]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/lightnvm/rrpc.c
lightnvm: export set bad block table
[karo-tx-linux.git] / drivers / lightnvm / rrpc.c
1 /*
2  * Copyright (C) 2015 IT University of Copenhagen
3  * Initial release: Matias Bjorling <m@bjorling.me>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version
7  * 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
15  */
16
17 #include "rrpc.h"
18
19 static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
20 static DECLARE_RWSEM(rrpc_lock);
21
22 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
23                                 struct nvm_rq *rqd, unsigned long flags);
24
25 #define rrpc_for_each_lun(rrpc, rlun, i) \
26                 for ((i) = 0, rlun = &(rrpc)->luns[0]; \
27                         (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
28
29 static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
30 {
31         struct rrpc_block *rblk = a->rblk;
32         unsigned int pg_offset;
33
34         lockdep_assert_held(&rrpc->rev_lock);
35
36         if (a->addr == ADDR_EMPTY || !rblk)
37                 return;
38
39         spin_lock(&rblk->lock);
40
41         div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset);
42         WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
43         rblk->nr_invalid_pages++;
44
45         spin_unlock(&rblk->lock);
46
47         rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
48 }
49
50 static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
51                                                         unsigned int len)
52 {
53         sector_t i;
54
55         spin_lock(&rrpc->rev_lock);
56         for (i = slba; i < slba + len; i++) {
57                 struct rrpc_addr *gp = &rrpc->trans_map[i];
58
59                 rrpc_page_invalidate(rrpc, gp);
60                 gp->rblk = NULL;
61         }
62         spin_unlock(&rrpc->rev_lock);
63 }
64
65 static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
66                                         sector_t laddr, unsigned int pages)
67 {
68         struct nvm_rq *rqd;
69         struct rrpc_inflight_rq *inf;
70
71         rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
72         if (!rqd)
73                 return ERR_PTR(-ENOMEM);
74
75         inf = rrpc_get_inflight_rq(rqd);
76         if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
77                 mempool_free(rqd, rrpc->rq_pool);
78                 return NULL;
79         }
80
81         return rqd;
82 }
83
84 static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
85 {
86         struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
87
88         rrpc_unlock_laddr(rrpc, inf);
89
90         mempool_free(rqd, rrpc->rq_pool);
91 }
92
93 static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
94 {
95         sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
96         sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
97         struct nvm_rq *rqd;
98
99         while (1) {
100                 rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
101                 if (rqd)
102                         break;
103
104                 schedule();
105         }
106
107         if (IS_ERR(rqd)) {
108                 pr_err("rrpc: unable to acquire inflight IO\n");
109                 bio_io_error(bio);
110                 return;
111         }
112
113         rrpc_invalidate_range(rrpc, slba, len);
114         rrpc_inflight_laddr_release(rrpc, rqd);
115 }
116
117 static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
118 {
119         return (rblk->next_page == rrpc->dev->sec_per_blk);
120 }
121
122 /* Calculate relative addr for the given block, considering instantiated LUNs */
123 static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
124 {
125         struct nvm_block *blk = rblk->parent;
126         int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns);
127
128         return lun_blk * rrpc->dev->sec_per_blk;
129 }
130
131 /* Calculate global addr for the given block */
132 static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
133 {
134         struct nvm_block *blk = rblk->parent;
135
136         return blk->id * rrpc->dev->sec_per_blk;
137 }
138
139 static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
140                                                         struct ppa_addr r)
141 {
142         struct ppa_addr l;
143         int secs, pgs, blks, luns;
144         sector_t ppa = r.ppa;
145
146         l.ppa = 0;
147
148         div_u64_rem(ppa, dev->sec_per_pg, &secs);
149         l.g.sec = secs;
150
151         sector_div(ppa, dev->sec_per_pg);
152         div_u64_rem(ppa, dev->pgs_per_blk, &pgs);
153         l.g.pg = pgs;
154
155         sector_div(ppa, dev->pgs_per_blk);
156         div_u64_rem(ppa, dev->blks_per_lun, &blks);
157         l.g.blk = blks;
158
159         sector_div(ppa, dev->blks_per_lun);
160         div_u64_rem(ppa, dev->luns_per_chnl, &luns);
161         l.g.lun = luns;
162
163         sector_div(ppa, dev->luns_per_chnl);
164         l.g.ch = ppa;
165
166         return l;
167 }
168
169 static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
170 {
171         struct ppa_addr paddr;
172
173         paddr.ppa = addr;
174         return linear_to_generic_addr(dev, paddr);
175 }
176
177 /* requires lun->lock taken */
178 static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
179                                                 struct rrpc_block **cur_rblk)
180 {
181         struct rrpc *rrpc = rlun->rrpc;
182
183         if (*cur_rblk) {
184                 spin_lock(&(*cur_rblk)->lock);
185                 WARN_ON(!block_is_full(rrpc, *cur_rblk));
186                 spin_unlock(&(*cur_rblk)->lock);
187         }
188         *cur_rblk = new_rblk;
189 }
190
191 static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
192                                                         unsigned long flags)
193 {
194         struct nvm_block *blk;
195         struct rrpc_block *rblk;
196
197         blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
198         if (!blk) {
199                 pr_err("nvm: rrpc: cannot get new block from media manager\n");
200                 return NULL;
201         }
202
203         rblk = rrpc_get_rblk(rlun, blk->id);
204         blk->priv = rblk;
205         bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
206         rblk->next_page = 0;
207         rblk->nr_invalid_pages = 0;
208         atomic_set(&rblk->data_cmnt_size, 0);
209
210         return rblk;
211 }
212
213 static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
214 {
215         nvm_put_blk(rrpc->dev, rblk->parent);
216 }
217
218 static void rrpc_put_blks(struct rrpc *rrpc)
219 {
220         struct rrpc_lun *rlun;
221         int i;
222
223         for (i = 0; i < rrpc->nr_luns; i++) {
224                 rlun = &rrpc->luns[i];
225                 if (rlun->cur)
226                         rrpc_put_blk(rrpc, rlun->cur);
227                 if (rlun->gc_cur)
228                         rrpc_put_blk(rrpc, rlun->gc_cur);
229         }
230 }
231
232 static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
233 {
234         int next = atomic_inc_return(&rrpc->next_lun);
235
236         return &rrpc->luns[next % rrpc->nr_luns];
237 }
238
239 static void rrpc_gc_kick(struct rrpc *rrpc)
240 {
241         struct rrpc_lun *rlun;
242         unsigned int i;
243
244         for (i = 0; i < rrpc->nr_luns; i++) {
245                 rlun = &rrpc->luns[i];
246                 queue_work(rrpc->krqd_wq, &rlun->ws_gc);
247         }
248 }
249
250 /*
251  * timed GC every interval.
252  */
253 static void rrpc_gc_timer(unsigned long data)
254 {
255         struct rrpc *rrpc = (struct rrpc *)data;
256
257         rrpc_gc_kick(rrpc);
258         mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
259 }
260
261 static void rrpc_end_sync_bio(struct bio *bio)
262 {
263         struct completion *waiting = bio->bi_private;
264
265         if (bio->bi_error)
266                 pr_err("nvm: gc request failed (%u).\n", bio->bi_error);
267
268         complete(waiting);
269 }
270
271 /*
272  * rrpc_move_valid_pages -- migrate live data off the block
273  * @rrpc: the 'rrpc' structure
274  * @block: the block from which to migrate live pages
275  *
276  * Description:
277  *   GC algorithms may call this function to migrate remaining live
278  *   pages off the block prior to erasing it. This function blocks
279  *   further execution until the operation is complete.
280  */
281 static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
282 {
283         struct request_queue *q = rrpc->dev->q;
284         struct rrpc_rev_addr *rev;
285         struct nvm_rq *rqd;
286         struct bio *bio;
287         struct page *page;
288         int slot;
289         int nr_sec_per_blk = rrpc->dev->sec_per_blk;
290         u64 phys_addr;
291         DECLARE_COMPLETION_ONSTACK(wait);
292
293         if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
294                 return 0;
295
296         bio = bio_alloc(GFP_NOIO, 1);
297         if (!bio) {
298                 pr_err("nvm: could not alloc bio to gc\n");
299                 return -ENOMEM;
300         }
301
302         page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
303         if (!page) {
304                 bio_put(bio);
305                 return -ENOMEM;
306         }
307
308         while ((slot = find_first_zero_bit(rblk->invalid_pages,
309                                             nr_sec_per_blk)) < nr_sec_per_blk) {
310
311                 /* Lock laddr */
312                 phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
313
314 try:
315                 spin_lock(&rrpc->rev_lock);
316                 /* Get logical address from physical to logical table */
317                 rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
318                 /* already updated by previous regular write */
319                 if (rev->addr == ADDR_EMPTY) {
320                         spin_unlock(&rrpc->rev_lock);
321                         continue;
322                 }
323
324                 rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
325                 if (IS_ERR_OR_NULL(rqd)) {
326                         spin_unlock(&rrpc->rev_lock);
327                         schedule();
328                         goto try;
329                 }
330
331                 spin_unlock(&rrpc->rev_lock);
332
333                 /* Perform read to do GC */
334                 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
335                 bio_set_op_attrs(bio,  REQ_OP_READ, 0);
336                 bio->bi_private = &wait;
337                 bio->bi_end_io = rrpc_end_sync_bio;
338
339                 /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
340                 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
341
342                 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
343                         pr_err("rrpc: gc read failed.\n");
344                         rrpc_inflight_laddr_release(rrpc, rqd);
345                         goto finished;
346                 }
347                 wait_for_completion_io(&wait);
348                 if (bio->bi_error) {
349                         rrpc_inflight_laddr_release(rrpc, rqd);
350                         goto finished;
351                 }
352
353                 bio_reset(bio);
354                 reinit_completion(&wait);
355
356                 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
357                 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
358                 bio->bi_private = &wait;
359                 bio->bi_end_io = rrpc_end_sync_bio;
360
361                 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
362
363                 /* turn the command around and write the data back to a new
364                  * address
365                  */
366                 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
367                         pr_err("rrpc: gc write failed.\n");
368                         rrpc_inflight_laddr_release(rrpc, rqd);
369                         goto finished;
370                 }
371                 wait_for_completion_io(&wait);
372
373                 rrpc_inflight_laddr_release(rrpc, rqd);
374                 if (bio->bi_error)
375                         goto finished;
376
377                 bio_reset(bio);
378         }
379
380 finished:
381         mempool_free(page, rrpc->page_pool);
382         bio_put(bio);
383
384         if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
385                 pr_err("nvm: failed to garbage collect block\n");
386                 return -EIO;
387         }
388
389         return 0;
390 }
391
392 static void rrpc_block_gc(struct work_struct *work)
393 {
394         struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
395                                                                         ws_gc);
396         struct rrpc *rrpc = gcb->rrpc;
397         struct rrpc_block *rblk = gcb->rblk;
398         struct rrpc_lun *rlun = rblk->rlun;
399         struct nvm_dev *dev = rrpc->dev;
400
401         mempool_free(gcb, rrpc->gcb_pool);
402         pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
403
404         if (rrpc_move_valid_pages(rrpc, rblk))
405                 goto put_back;
406
407         if (nvm_erase_blk(dev, rblk->parent, 0))
408                 goto put_back;
409
410         rrpc_put_blk(rrpc, rblk);
411
412         return;
413
414 put_back:
415         spin_lock(&rlun->lock);
416         list_add_tail(&rblk->prio, &rlun->prio_list);
417         spin_unlock(&rlun->lock);
418 }
419
420 /* the block with highest number of invalid pages, will be in the beginning
421  * of the list
422  */
423 static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
424                                                         struct rrpc_block *rb)
425 {
426         if (ra->nr_invalid_pages == rb->nr_invalid_pages)
427                 return ra;
428
429         return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
430 }
431
432 /* linearly find the block with highest number of invalid pages
433  * requires lun->lock
434  */
435 static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
436 {
437         struct list_head *prio_list = &rlun->prio_list;
438         struct rrpc_block *rblock, *max;
439
440         BUG_ON(list_empty(prio_list));
441
442         max = list_first_entry(prio_list, struct rrpc_block, prio);
443         list_for_each_entry(rblock, prio_list, prio)
444                 max = rblock_max_invalid(max, rblock);
445
446         return max;
447 }
448
449 static void rrpc_lun_gc(struct work_struct *work)
450 {
451         struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
452         struct rrpc *rrpc = rlun->rrpc;
453         struct nvm_lun *lun = rlun->parent;
454         struct rrpc_block_gc *gcb;
455         unsigned int nr_blocks_need;
456
457         nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;
458
459         if (nr_blocks_need < rrpc->nr_luns)
460                 nr_blocks_need = rrpc->nr_luns;
461
462         spin_lock(&rlun->lock);
463         while (nr_blocks_need > lun->nr_free_blocks &&
464                                         !list_empty(&rlun->prio_list)) {
465                 struct rrpc_block *rblock = block_prio_find_max(rlun);
466                 struct nvm_block *block = rblock->parent;
467
468                 if (!rblock->nr_invalid_pages)
469                         break;
470
471                 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
472                 if (!gcb)
473                         break;
474
475                 list_del_init(&rblock->prio);
476
477                 BUG_ON(!block_is_full(rrpc, rblock));
478
479                 pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
480
481                 gcb->rrpc = rrpc;
482                 gcb->rblk = rblock;
483                 INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
484
485                 queue_work(rrpc->kgc_wq, &gcb->ws_gc);
486
487                 nr_blocks_need--;
488         }
489         spin_unlock(&rlun->lock);
490
491         /* TODO: Hint that request queue can be started again */
492 }
493
494 static void rrpc_gc_queue(struct work_struct *work)
495 {
496         struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
497                                                                         ws_gc);
498         struct rrpc *rrpc = gcb->rrpc;
499         struct rrpc_block *rblk = gcb->rblk;
500         struct rrpc_lun *rlun = rblk->rlun;
501
502         spin_lock(&rlun->lock);
503         list_add_tail(&rblk->prio, &rlun->prio_list);
504         spin_unlock(&rlun->lock);
505
506         mempool_free(gcb, rrpc->gcb_pool);
507         pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
508                                                         rblk->parent->id);
509 }
510
511 static const struct block_device_operations rrpc_fops = {
512         .owner          = THIS_MODULE,
513 };
514
515 static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
516 {
517         unsigned int i;
518         struct rrpc_lun *rlun, *max_free;
519
520         if (!is_gc)
521                 return get_next_lun(rrpc);
522
523         /* during GC, we don't care about RR, instead we want to make
524          * sure that we maintain evenness between the block luns.
525          */
526         max_free = &rrpc->luns[0];
527         /* prevent GC-ing lun from devouring pages of a lun with
528          * little free blocks. We don't take the lock as we only need an
529          * estimate.
530          */
531         rrpc_for_each_lun(rrpc, rlun, i) {
532                 if (rlun->parent->nr_free_blocks >
533                                         max_free->parent->nr_free_blocks)
534                         max_free = rlun;
535         }
536
537         return max_free;
538 }
539
540 static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
541                                         struct rrpc_block *rblk, u64 paddr)
542 {
543         struct rrpc_addr *gp;
544         struct rrpc_rev_addr *rev;
545
546         BUG_ON(laddr >= rrpc->nr_sects);
547
548         gp = &rrpc->trans_map[laddr];
549         spin_lock(&rrpc->rev_lock);
550         if (gp->rblk)
551                 rrpc_page_invalidate(rrpc, gp);
552
553         gp->addr = paddr;
554         gp->rblk = rblk;
555
556         rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
557         rev->addr = laddr;
558         spin_unlock(&rrpc->rev_lock);
559
560         return gp;
561 }
562
563 static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
564 {
565         u64 addr = ADDR_EMPTY;
566
567         spin_lock(&rblk->lock);
568         if (block_is_full(rrpc, rblk))
569                 goto out;
570
571         addr = block_to_addr(rrpc, rblk) + rblk->next_page;
572
573         rblk->next_page++;
574 out:
575         spin_unlock(&rblk->lock);
576         return addr;
577 }
578
579 /* Map logical address to a physical page. The mapping implements a round robin
580  * approach and allocates a page from the next lun available.
581  *
582  * Returns rrpc_addr with the physical address and block. Returns NULL if no
583  * blocks in the next rlun are available.
584  */
585 static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
586                                                                 int is_gc)
587 {
588         struct rrpc_lun *rlun;
589         struct rrpc_block *rblk, **cur_rblk;
590         struct nvm_lun *lun;
591         u64 paddr;
592         int gc_force = 0;
593
594         rlun = rrpc_get_lun_rr(rrpc, is_gc);
595         lun = rlun->parent;
596
597         if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
598                 return NULL;
599
600         /*
601          * page allocation steps:
602          * 1. Try to allocate new page from current rblk
603          * 2a. If succeed, proceed to map it in and return
604          * 2b. If fail, first try to allocate a new block from media manger,
605          *     and then retry step 1. Retry until the normal block pool is
606          *     exhausted.
607          * 3. If exhausted, and garbage collector is requesting the block,
608          *    go to the reserved block and retry step 1.
609          *    In the case that this fails as well, or it is not GC
610          *    requesting, report not able to retrieve a block and let the
611          *    caller handle further processing.
612          */
613
614         spin_lock(&rlun->lock);
615         cur_rblk = &rlun->cur;
616         rblk = rlun->cur;
617 retry:
618         paddr = rrpc_alloc_addr(rrpc, rblk);
619
620         if (paddr != ADDR_EMPTY)
621                 goto done;
622
623         if (!list_empty(&rlun->wblk_list)) {
624 new_blk:
625                 rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
626                                                                         prio);
627                 rrpc_set_lun_cur(rlun, rblk, cur_rblk);
628                 list_del(&rblk->prio);
629                 goto retry;
630         }
631         spin_unlock(&rlun->lock);
632
633         rblk = rrpc_get_blk(rrpc, rlun, gc_force);
634         if (rblk) {
635                 spin_lock(&rlun->lock);
636                 list_add_tail(&rblk->prio, &rlun->wblk_list);
637                 /*
638                  * another thread might already have added a new block,
639                  * Therefore, make sure that one is used, instead of the
640                  * one just added.
641                  */
642                 goto new_blk;
643         }
644
645         if (unlikely(is_gc) && !gc_force) {
646                 /* retry from emergency gc block */
647                 cur_rblk = &rlun->gc_cur;
648                 rblk = rlun->gc_cur;
649                 gc_force = 1;
650                 spin_lock(&rlun->lock);
651                 goto retry;
652         }
653
654         pr_err("rrpc: failed to allocate new block\n");
655         return NULL;
656 done:
657         spin_unlock(&rlun->lock);
658         return rrpc_update_map(rrpc, laddr, rblk, paddr);
659 }
660
661 static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
662 {
663         struct rrpc_block_gc *gcb;
664
665         gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
666         if (!gcb) {
667                 pr_err("rrpc: unable to queue block for gc.");
668                 return;
669         }
670
671         gcb->rrpc = rrpc;
672         gcb->rblk = rblk;
673
674         INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
675         queue_work(rrpc->kgc_wq, &gcb->ws_gc);
676 }
677
678 static void __rrpc_mark_bad_block(struct nvm_dev *dev, struct ppa_addr *ppa)
679 {
680                 nvm_mark_blk(dev, *ppa, NVM_BLK_ST_BAD);
681                 nvm_set_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
682 }
683
684 static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
685 {
686         struct nvm_dev *dev = rrpc->dev;
687         void *comp_bits = &rqd->ppa_status;
688         struct ppa_addr ppa, prev_ppa;
689         int nr_ppas = rqd->nr_ppas;
690         int bit;
691
692         if (rqd->nr_ppas == 1)
693                 __rrpc_mark_bad_block(dev, &rqd->ppa_addr);
694
695         ppa_set_empty(&prev_ppa);
696         bit = -1;
697         while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
698                 ppa = rqd->ppa_list[bit];
699                 if (ppa_cmp_blk(ppa, prev_ppa))
700                         continue;
701
702                 __rrpc_mark_bad_block(dev, &ppa);
703         }
704 }
705
706 static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
707                                                 sector_t laddr, uint8_t npages)
708 {
709         struct rrpc_addr *p;
710         struct rrpc_block *rblk;
711         struct nvm_lun *lun;
712         int cmnt_size, i;
713
714         for (i = 0; i < npages; i++) {
715                 p = &rrpc->trans_map[laddr + i];
716                 rblk = p->rblk;
717                 lun = rblk->parent->lun;
718
719                 cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
720                 if (unlikely(cmnt_size == rrpc->dev->sec_per_blk))
721                         rrpc_run_gc(rrpc, rblk);
722         }
723 }
724
725 static void rrpc_end_io(struct nvm_rq *rqd)
726 {
727         struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
728         struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
729         uint8_t npages = rqd->nr_ppas;
730         sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
731
732         if (bio_data_dir(rqd->bio) == WRITE) {
733                 if (rqd->error == NVM_RSP_ERR_FAILWRITE)
734                         rrpc_mark_bad_block(rrpc, rqd);
735
736                 rrpc_end_io_write(rrpc, rrqd, laddr, npages);
737         }
738
739         bio_put(rqd->bio);
740
741         if (rrqd->flags & NVM_IOTYPE_GC)
742                 return;
743
744         rrpc_unlock_rq(rrpc, rqd);
745
746         if (npages > 1)
747                 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
748
749         mempool_free(rqd, rrpc->rq_pool);
750 }
751
752 static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
753                         struct nvm_rq *rqd, unsigned long flags, int npages)
754 {
755         struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
756         struct rrpc_addr *gp;
757         sector_t laddr = rrpc_get_laddr(bio);
758         int is_gc = flags & NVM_IOTYPE_GC;
759         int i;
760
761         if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
762                 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
763                 return NVM_IO_REQUEUE;
764         }
765
766         for (i = 0; i < npages; i++) {
767                 /* We assume that mapping occurs at 4KB granularity */
768                 BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects));
769                 gp = &rrpc->trans_map[laddr + i];
770
771                 if (gp->rblk) {
772                         rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
773                                                                 gp->addr);
774                 } else {
775                         BUG_ON(is_gc);
776                         rrpc_unlock_laddr(rrpc, r);
777                         nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
778                                                         rqd->dma_ppa_list);
779                         return NVM_IO_DONE;
780                 }
781         }
782
783         rqd->opcode = NVM_OP_HBREAD;
784
785         return NVM_IO_OK;
786 }
787
788 static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
789                                                         unsigned long flags)
790 {
791         struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
792         int is_gc = flags & NVM_IOTYPE_GC;
793         sector_t laddr = rrpc_get_laddr(bio);
794         struct rrpc_addr *gp;
795
796         if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
797                 return NVM_IO_REQUEUE;
798
799         BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects));
800         gp = &rrpc->trans_map[laddr];
801
802         if (gp->rblk) {
803                 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
804         } else {
805                 BUG_ON(is_gc);
806                 rrpc_unlock_rq(rrpc, rqd);
807                 return NVM_IO_DONE;
808         }
809
810         rqd->opcode = NVM_OP_HBREAD;
811         rrqd->addr = gp;
812
813         return NVM_IO_OK;
814 }
815
816 static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
817                         struct nvm_rq *rqd, unsigned long flags, int npages)
818 {
819         struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
820         struct rrpc_addr *p;
821         sector_t laddr = rrpc_get_laddr(bio);
822         int is_gc = flags & NVM_IOTYPE_GC;
823         int i;
824
825         if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
826                 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
827                 return NVM_IO_REQUEUE;
828         }
829
830         for (i = 0; i < npages; i++) {
831                 /* We assume that mapping occurs at 4KB granularity */
832                 p = rrpc_map_page(rrpc, laddr + i, is_gc);
833                 if (!p) {
834                         BUG_ON(is_gc);
835                         rrpc_unlock_laddr(rrpc, r);
836                         nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
837                                                         rqd->dma_ppa_list);
838                         rrpc_gc_kick(rrpc);
839                         return NVM_IO_REQUEUE;
840                 }
841
842                 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
843                                                                 p->addr);
844         }
845
846         rqd->opcode = NVM_OP_HBWRITE;
847
848         return NVM_IO_OK;
849 }
850
851 static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
852                                 struct nvm_rq *rqd, unsigned long flags)
853 {
854         struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
855         struct rrpc_addr *p;
856         int is_gc = flags & NVM_IOTYPE_GC;
857         sector_t laddr = rrpc_get_laddr(bio);
858
859         if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
860                 return NVM_IO_REQUEUE;
861
862         p = rrpc_map_page(rrpc, laddr, is_gc);
863         if (!p) {
864                 BUG_ON(is_gc);
865                 rrpc_unlock_rq(rrpc, rqd);
866                 rrpc_gc_kick(rrpc);
867                 return NVM_IO_REQUEUE;
868         }
869
870         rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr);
871         rqd->opcode = NVM_OP_HBWRITE;
872         rrqd->addr = p;
873
874         return NVM_IO_OK;
875 }
876
877 static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
878                         struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
879 {
880         if (npages > 1) {
881                 rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL,
882                                                         &rqd->dma_ppa_list);
883                 if (!rqd->ppa_list) {
884                         pr_err("rrpc: not able to allocate ppa list\n");
885                         return NVM_IO_ERR;
886                 }
887
888                 if (bio_op(bio) == REQ_OP_WRITE)
889                         return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
890                                                                         npages);
891
892                 return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
893         }
894
895         if (bio_op(bio) == REQ_OP_WRITE)
896                 return rrpc_write_rq(rrpc, bio, rqd, flags);
897
898         return rrpc_read_rq(rrpc, bio, rqd, flags);
899 }
900
901 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
902                                 struct nvm_rq *rqd, unsigned long flags)
903 {
904         int err;
905         struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
906         uint8_t nr_pages = rrpc_get_pages(bio);
907         int bio_size = bio_sectors(bio) << 9;
908
909         if (bio_size < rrpc->dev->sec_size)
910                 return NVM_IO_ERR;
911         else if (bio_size > rrpc->dev->max_rq_size)
912                 return NVM_IO_ERR;
913
914         err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
915         if (err)
916                 return err;
917
918         bio_get(bio);
919         rqd->bio = bio;
920         rqd->ins = &rrpc->instance;
921         rqd->nr_ppas = nr_pages;
922         rrq->flags = flags;
923
924         err = nvm_submit_io(rrpc->dev, rqd);
925         if (err) {
926                 pr_err("rrpc: I/O submission failed: %d\n", err);
927                 bio_put(bio);
928                 if (!(flags & NVM_IOTYPE_GC)) {
929                         rrpc_unlock_rq(rrpc, rqd);
930                         if (rqd->nr_ppas > 1)
931                                 nvm_dev_dma_free(rrpc->dev,
932                         rqd->ppa_list, rqd->dma_ppa_list);
933                 }
934                 return NVM_IO_ERR;
935         }
936
937         return NVM_IO_OK;
938 }
939
940 static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
941 {
942         struct rrpc *rrpc = q->queuedata;
943         struct nvm_rq *rqd;
944         int err;
945
946         if (bio_op(bio) == REQ_OP_DISCARD) {
947                 rrpc_discard(rrpc, bio);
948                 return BLK_QC_T_NONE;
949         }
950
951         rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
952         if (!rqd) {
953                 pr_err_ratelimited("rrpc: not able to queue bio.");
954                 bio_io_error(bio);
955                 return BLK_QC_T_NONE;
956         }
957         memset(rqd, 0, sizeof(struct nvm_rq));
958
959         err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
960         switch (err) {
961         case NVM_IO_OK:
962                 return BLK_QC_T_NONE;
963         case NVM_IO_ERR:
964                 bio_io_error(bio);
965                 break;
966         case NVM_IO_DONE:
967                 bio_endio(bio);
968                 break;
969         case NVM_IO_REQUEUE:
970                 spin_lock(&rrpc->bio_lock);
971                 bio_list_add(&rrpc->requeue_bios, bio);
972                 spin_unlock(&rrpc->bio_lock);
973                 queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
974                 break;
975         }
976
977         mempool_free(rqd, rrpc->rq_pool);
978         return BLK_QC_T_NONE;
979 }
980
981 static void rrpc_requeue(struct work_struct *work)
982 {
983         struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
984         struct bio_list bios;
985         struct bio *bio;
986
987         bio_list_init(&bios);
988
989         spin_lock(&rrpc->bio_lock);
990         bio_list_merge(&bios, &rrpc->requeue_bios);
991         bio_list_init(&rrpc->requeue_bios);
992         spin_unlock(&rrpc->bio_lock);
993
994         while ((bio = bio_list_pop(&bios)))
995                 rrpc_make_rq(rrpc->disk->queue, bio);
996 }
997
998 static void rrpc_gc_free(struct rrpc *rrpc)
999 {
1000         if (rrpc->krqd_wq)
1001                 destroy_workqueue(rrpc->krqd_wq);
1002
1003         if (rrpc->kgc_wq)
1004                 destroy_workqueue(rrpc->kgc_wq);
1005 }
1006
1007 static int rrpc_gc_init(struct rrpc *rrpc)
1008 {
1009         rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
1010                                                                 rrpc->nr_luns);
1011         if (!rrpc->krqd_wq)
1012                 return -ENOMEM;
1013
1014         rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
1015         if (!rrpc->kgc_wq)
1016                 return -ENOMEM;
1017
1018         setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
1019
1020         return 0;
1021 }
1022
1023 static void rrpc_map_free(struct rrpc *rrpc)
1024 {
1025         vfree(rrpc->rev_trans_map);
1026         vfree(rrpc->trans_map);
1027 }
1028
1029 static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
1030 {
1031         struct rrpc *rrpc = (struct rrpc *)private;
1032         struct nvm_dev *dev = rrpc->dev;
1033         struct rrpc_addr *addr = rrpc->trans_map + slba;
1034         struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
1035         u64 elba = slba + nlb;
1036         u64 i;
1037
1038         if (unlikely(elba > dev->total_secs)) {
1039                 pr_err("nvm: L2P data from device is out of bounds!\n");
1040                 return -EINVAL;
1041         }
1042
1043         for (i = 0; i < nlb; i++) {
1044                 u64 pba = le64_to_cpu(entries[i]);
1045                 unsigned int mod;
1046                 /* LNVM treats address-spaces as silos, LBA and PBA are
1047                  * equally large and zero-indexed.
1048                  */
1049                 if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
1050                         pr_err("nvm: L2P data entry is out of bounds!\n");
1051                         return -EINVAL;
1052                 }
1053
1054                 /* Address zero is a special one. The first page on a disk is
1055                  * protected. As it often holds internal device boot
1056                  * information.
1057                  */
1058                 if (!pba)
1059                         continue;
1060
1061                 div_u64_rem(pba, rrpc->nr_sects, &mod);
1062
1063                 addr[i].addr = pba;
1064                 raddr[mod].addr = slba + i;
1065         }
1066
1067         return 0;
1068 }
1069
1070 static int rrpc_map_init(struct rrpc *rrpc)
1071 {
1072         struct nvm_dev *dev = rrpc->dev;
1073         sector_t i;
1074         int ret;
1075
1076         rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
1077         if (!rrpc->trans_map)
1078                 return -ENOMEM;
1079
1080         rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
1081                                                         * rrpc->nr_sects);
1082         if (!rrpc->rev_trans_map)
1083                 return -ENOMEM;
1084
1085         for (i = 0; i < rrpc->nr_sects; i++) {
1086                 struct rrpc_addr *p = &rrpc->trans_map[i];
1087                 struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
1088
1089                 p->addr = ADDR_EMPTY;
1090                 r->addr = ADDR_EMPTY;
1091         }
1092
1093         if (!dev->ops->get_l2p_tbl)
1094                 return 0;
1095
1096         /* Bring up the mapping table from device */
1097         ret = dev->ops->get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
1098                                         rrpc_l2p_update, rrpc);
1099         if (ret) {
1100                 pr_err("nvm: rrpc: could not read L2P table.\n");
1101                 return -EINVAL;
1102         }
1103
1104         return 0;
1105 }
1106
1107 /* Minimum pages needed within a lun */
1108 #define PAGE_POOL_SIZE 16
1109 #define ADDR_POOL_SIZE 64
1110
1111 static int rrpc_core_init(struct rrpc *rrpc)
1112 {
1113         down_write(&rrpc_lock);
1114         if (!rrpc_gcb_cache) {
1115                 rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
1116                                 sizeof(struct rrpc_block_gc), 0, 0, NULL);
1117                 if (!rrpc_gcb_cache) {
1118                         up_write(&rrpc_lock);
1119                         return -ENOMEM;
1120                 }
1121
1122                 rrpc_rq_cache = kmem_cache_create("rrpc_rq",
1123                                 sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
1124                                 0, 0, NULL);
1125                 if (!rrpc_rq_cache) {
1126                         kmem_cache_destroy(rrpc_gcb_cache);
1127                         up_write(&rrpc_lock);
1128                         return -ENOMEM;
1129                 }
1130         }
1131         up_write(&rrpc_lock);
1132
1133         rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
1134         if (!rrpc->page_pool)
1135                 return -ENOMEM;
1136
1137         rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns,
1138                                                                 rrpc_gcb_cache);
1139         if (!rrpc->gcb_pool)
1140                 return -ENOMEM;
1141
1142         rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
1143         if (!rrpc->rq_pool)
1144                 return -ENOMEM;
1145
1146         spin_lock_init(&rrpc->inflights.lock);
1147         INIT_LIST_HEAD(&rrpc->inflights.reqs);
1148
1149         return 0;
1150 }
1151
1152 static void rrpc_core_free(struct rrpc *rrpc)
1153 {
1154         mempool_destroy(rrpc->page_pool);
1155         mempool_destroy(rrpc->gcb_pool);
1156         mempool_destroy(rrpc->rq_pool);
1157 }
1158
1159 static void rrpc_luns_free(struct rrpc *rrpc)
1160 {
1161         struct nvm_dev *dev = rrpc->dev;
1162         struct nvm_lun *lun;
1163         struct rrpc_lun *rlun;
1164         int i;
1165
1166         if (!rrpc->luns)
1167                 return;
1168
1169         for (i = 0; i < rrpc->nr_luns; i++) {
1170                 rlun = &rrpc->luns[i];
1171                 lun = rlun->parent;
1172                 if (!lun)
1173                         break;
1174                 dev->mt->release_lun(dev, lun->id);
1175                 vfree(rlun->blocks);
1176         }
1177
1178         kfree(rrpc->luns);
1179 }
1180
1181 static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
1182 {
1183         struct nvm_dev *dev = rrpc->dev;
1184         struct rrpc_lun *rlun;
1185         int i, j, ret = -EINVAL;
1186
1187         if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
1188                 pr_err("rrpc: number of pages per block too high.");
1189                 return -EINVAL;
1190         }
1191
1192         spin_lock_init(&rrpc->rev_lock);
1193
1194         rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
1195                                                                 GFP_KERNEL);
1196         if (!rrpc->luns)
1197                 return -ENOMEM;
1198
1199         /* 1:1 mapping */
1200         for (i = 0; i < rrpc->nr_luns; i++) {
1201                 int lunid = lun_begin + i;
1202                 struct nvm_lun *lun;
1203
1204                 if (dev->mt->reserve_lun(dev, lunid)) {
1205                         pr_err("rrpc: lun %u is already allocated\n", lunid);
1206                         goto err;
1207                 }
1208
1209                 lun = dev->mt->get_lun(dev, lunid);
1210                 if (!lun)
1211                         goto err;
1212
1213                 rlun = &rrpc->luns[i];
1214                 rlun->parent = lun;
1215                 rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
1216                                                 rrpc->dev->blks_per_lun);
1217                 if (!rlun->blocks) {
1218                         ret = -ENOMEM;
1219                         goto err;
1220                 }
1221
1222                 for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
1223                         struct rrpc_block *rblk = &rlun->blocks[j];
1224                         struct nvm_block *blk = &lun->blocks[j];
1225
1226                         rblk->parent = blk;
1227                         rblk->rlun = rlun;
1228                         INIT_LIST_HEAD(&rblk->prio);
1229                         spin_lock_init(&rblk->lock);
1230                 }
1231
1232                 rlun->rrpc = rrpc;
1233                 INIT_LIST_HEAD(&rlun->prio_list);
1234                 INIT_LIST_HEAD(&rlun->wblk_list);
1235
1236                 INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
1237                 spin_lock_init(&rlun->lock);
1238         }
1239
1240         return 0;
1241 err:
1242         return ret;
1243 }
1244
1245 /* returns 0 on success and stores the beginning address in *begin */
1246 static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
1247 {
1248         struct nvm_dev *dev = rrpc->dev;
1249         struct nvmm_type *mt = dev->mt;
1250         sector_t size = rrpc->nr_sects * dev->sec_size;
1251         int ret;
1252
1253         size >>= 9;
1254
1255         ret = mt->get_area(dev, begin, size);
1256         if (!ret)
1257                 *begin >>= (ilog2(dev->sec_size) - 9);
1258
1259         return ret;
1260 }
1261
1262 static void rrpc_area_free(struct rrpc *rrpc)
1263 {
1264         struct nvm_dev *dev = rrpc->dev;
1265         struct nvmm_type *mt = dev->mt;
1266         sector_t begin = rrpc->soffset << (ilog2(dev->sec_size) - 9);
1267
1268         mt->put_area(dev, begin);
1269 }
1270
1271 static void rrpc_free(struct rrpc *rrpc)
1272 {
1273         rrpc_gc_free(rrpc);
1274         rrpc_map_free(rrpc);
1275         rrpc_core_free(rrpc);
1276         rrpc_luns_free(rrpc);
1277         rrpc_area_free(rrpc);
1278
1279         kfree(rrpc);
1280 }
1281
1282 static void rrpc_exit(void *private)
1283 {
1284         struct rrpc *rrpc = private;
1285
1286         del_timer(&rrpc->gc_timer);
1287
1288         flush_workqueue(rrpc->krqd_wq);
1289         flush_workqueue(rrpc->kgc_wq);
1290
1291         rrpc_free(rrpc);
1292 }
1293
1294 static sector_t rrpc_capacity(void *private)
1295 {
1296         struct rrpc *rrpc = private;
1297         struct nvm_dev *dev = rrpc->dev;
1298         sector_t reserved, provisioned;
1299
1300         /* cur, gc, and two emergency blocks for each lun */
1301         reserved = rrpc->nr_luns * dev->sec_per_blk * 4;
1302         provisioned = rrpc->nr_sects - reserved;
1303
1304         if (reserved > rrpc->nr_sects) {
1305                 pr_err("rrpc: not enough space available to expose storage.\n");
1306                 return 0;
1307         }
1308
1309         sector_div(provisioned, 10);
1310         return provisioned * 9 * NR_PHY_IN_LOG;
1311 }
1312
1313 /*
1314  * Looks up the logical address from reverse trans map and check if its valid by
1315  * comparing the logical to physical address with the physical address.
1316  * Returns 0 on free, otherwise 1 if in use
1317  */
1318 static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
1319 {
1320         struct nvm_dev *dev = rrpc->dev;
1321         int offset;
1322         struct rrpc_addr *laddr;
1323         u64 bpaddr, paddr, pladdr;
1324
1325         bpaddr = block_to_rel_addr(rrpc, rblk);
1326         for (offset = 0; offset < dev->sec_per_blk; offset++) {
1327                 paddr = bpaddr + offset;
1328
1329                 pladdr = rrpc->rev_trans_map[paddr].addr;
1330                 if (pladdr == ADDR_EMPTY)
1331                         continue;
1332
1333                 laddr = &rrpc->trans_map[pladdr];
1334
1335                 if (paddr == laddr->addr) {
1336                         laddr->rblk = rblk;
1337                 } else {
1338                         set_bit(offset, rblk->invalid_pages);
1339                         rblk->nr_invalid_pages++;
1340                 }
1341         }
1342 }
1343
1344 static int rrpc_blocks_init(struct rrpc *rrpc)
1345 {
1346         struct rrpc_lun *rlun;
1347         struct rrpc_block *rblk;
1348         int lun_iter, blk_iter;
1349
1350         for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
1351                 rlun = &rrpc->luns[lun_iter];
1352
1353                 for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun;
1354                                                                 blk_iter++) {
1355                         rblk = &rlun->blocks[blk_iter];
1356                         rrpc_block_map_update(rrpc, rblk);
1357                 }
1358         }
1359
1360         return 0;
1361 }
1362
1363 static int rrpc_luns_configure(struct rrpc *rrpc)
1364 {
1365         struct rrpc_lun *rlun;
1366         struct rrpc_block *rblk;
1367         int i;
1368
1369         for (i = 0; i < rrpc->nr_luns; i++) {
1370                 rlun = &rrpc->luns[i];
1371
1372                 rblk = rrpc_get_blk(rrpc, rlun, 0);
1373                 if (!rblk)
1374                         goto err;
1375                 rrpc_set_lun_cur(rlun, rblk, &rlun->cur);
1376
1377                 /* Emergency gc block */
1378                 rblk = rrpc_get_blk(rrpc, rlun, 1);
1379                 if (!rblk)
1380                         goto err;
1381                 rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur);
1382         }
1383
1384         return 0;
1385 err:
1386         rrpc_put_blks(rrpc);
1387         return -EINVAL;
1388 }
1389
1390 static struct nvm_tgt_type tt_rrpc;
1391
1392 static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
1393                                                 int lun_begin, int lun_end)
1394 {
1395         struct request_queue *bqueue = dev->q;
1396         struct request_queue *tqueue = tdisk->queue;
1397         struct rrpc *rrpc;
1398         sector_t soffset;
1399         int ret;
1400
1401         if (!(dev->identity.dom & NVM_RSP_L2P)) {
1402                 pr_err("nvm: rrpc: device does not support l2p (%x)\n",
1403                                                         dev->identity.dom);
1404                 return ERR_PTR(-EINVAL);
1405         }
1406
1407         rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
1408         if (!rrpc)
1409                 return ERR_PTR(-ENOMEM);
1410
1411         rrpc->instance.tt = &tt_rrpc;
1412         rrpc->dev = dev;
1413         rrpc->disk = tdisk;
1414
1415         bio_list_init(&rrpc->requeue_bios);
1416         spin_lock_init(&rrpc->bio_lock);
1417         INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
1418
1419         rrpc->nr_luns = lun_end - lun_begin + 1;
1420         rrpc->total_blocks = (unsigned long)dev->blks_per_lun * rrpc->nr_luns;
1421         rrpc->nr_sects = (unsigned long long)dev->sec_per_lun * rrpc->nr_luns;
1422
1423         /* simple round-robin strategy */
1424         atomic_set(&rrpc->next_lun, -1);
1425
1426         ret = rrpc_area_init(rrpc, &soffset);
1427         if (ret < 0) {
1428                 pr_err("nvm: rrpc: could not initialize area\n");
1429                 return ERR_PTR(ret);
1430         }
1431         rrpc->soffset = soffset;
1432
1433         ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
1434         if (ret) {
1435                 pr_err("nvm: rrpc: could not initialize luns\n");
1436                 goto err;
1437         }
1438
1439         rrpc->poffset = dev->sec_per_lun * lun_begin;
1440         rrpc->lun_offset = lun_begin;
1441
1442         ret = rrpc_core_init(rrpc);
1443         if (ret) {
1444                 pr_err("nvm: rrpc: could not initialize core\n");
1445                 goto err;
1446         }
1447
1448         ret = rrpc_map_init(rrpc);
1449         if (ret) {
1450                 pr_err("nvm: rrpc: could not initialize maps\n");
1451                 goto err;
1452         }
1453
1454         ret = rrpc_blocks_init(rrpc);
1455         if (ret) {
1456                 pr_err("nvm: rrpc: could not initialize state for blocks\n");
1457                 goto err;
1458         }
1459
1460         ret = rrpc_luns_configure(rrpc);
1461         if (ret) {
1462                 pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
1463                 goto err;
1464         }
1465
1466         ret = rrpc_gc_init(rrpc);
1467         if (ret) {
1468                 pr_err("nvm: rrpc: could not initialize gc\n");
1469                 goto err;
1470         }
1471
1472         /* inherit the size from the underlying device */
1473         blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1474         blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1475
1476         pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
1477                         rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
1478
1479         mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
1480
1481         return rrpc;
1482 err:
1483         rrpc_free(rrpc);
1484         return ERR_PTR(ret);
1485 }
1486
1487 /* round robin, page-based FTL, and cost-based GC */
1488 static struct nvm_tgt_type tt_rrpc = {
1489         .name           = "rrpc",
1490         .version        = {1, 0, 0},
1491
1492         .make_rq        = rrpc_make_rq,
1493         .capacity       = rrpc_capacity,
1494         .end_io         = rrpc_end_io,
1495
1496         .init           = rrpc_init,
1497         .exit           = rrpc_exit,
1498 };
1499
1500 static int __init rrpc_module_init(void)
1501 {
1502         return nvm_register_tgt_type(&tt_rrpc);
1503 }
1504
1505 static void rrpc_module_exit(void)
1506 {
1507         nvm_unregister_tgt_type(&tt_rrpc);
1508 }
1509
1510 module_init(rrpc_module_init);
1511 module_exit(rrpc_module_exit);
1512 MODULE_LICENSE("GPL v2");
1513 MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");