2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * pblk-core.c - pblk's core functionality
20 #include <linux/time.h>
22 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
25 struct nvm_tgt_dev *dev = pblk->dev;
26 struct nvm_geo *geo = &dev->geo;
27 int pos = pblk_dev_ppa_to_pos(geo, *ppa);
29 pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
30 atomic_long_inc(&pblk->erase_failed);
32 if (test_and_set_bit(pos, line->blk_bitmap))
33 pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
36 pblk_line_run_ws(pblk, NULL, ppa, pblk_line_mark_bb);
39 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
41 struct pblk_line *line;
43 line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
44 atomic_dec(&line->left_seblks);
49 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
54 pblk_mark_bb(pblk, line, ppa);
58 /* Erase completion assumes that only one block is erased at the time */
59 static void pblk_end_io_erase(struct nvm_rq *rqd)
61 struct pblk *pblk = rqd->private;
64 __pblk_end_io_erase(pblk, rqd);
65 mempool_free(rqd, pblk->r_rq_pool);
68 static void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
71 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
72 struct list_head *move_list = NULL;
74 /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
75 * table is modified with reclaimed sectors, a check is done to endure
76 * that newer updates are not overwritten.
78 spin_lock(&line->lock);
79 if (line->state == PBLK_LINESTATE_GC ||
80 line->state == PBLK_LINESTATE_FREE) {
81 spin_unlock(&line->lock);
85 if (test_and_set_bit(paddr, line->invalid_bitmap)) {
86 WARN_ONCE(1, "pblk: double invalidate\n");
87 spin_unlock(&line->lock);
92 if (line->state == PBLK_LINESTATE_CLOSED)
93 move_list = pblk_line_gc_list(pblk, line);
94 spin_unlock(&line->lock);
97 spin_lock(&l_mg->gc_lock);
98 spin_lock(&line->lock);
99 /* Prevent moving a line that has just been chosen for GC */
100 if (line->state == PBLK_LINESTATE_GC ||
101 line->state == PBLK_LINESTATE_FREE) {
102 spin_unlock(&line->lock);
103 spin_unlock(&l_mg->gc_lock);
106 spin_unlock(&line->lock);
108 list_move_tail(&line->list, move_list);
109 spin_unlock(&l_mg->gc_lock);
113 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
115 struct pblk_line *line;
119 #ifdef CONFIG_NVM_DEBUG
120 /* Callers must ensure that the ppa points to a device address */
121 BUG_ON(pblk_addr_in_cache(ppa));
122 BUG_ON(pblk_ppa_empty(ppa));
125 line_id = pblk_tgt_ppa_to_line(ppa);
126 line = &pblk->lines[line_id];
127 paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
129 __pblk_map_invalidate(pblk, line, paddr);
132 void pblk_map_pad_invalidate(struct pblk *pblk, struct pblk_line *line,
135 __pblk_map_invalidate(pblk, line, paddr);
137 pblk_rb_sync_init(&pblk->rwb, NULL);
139 if (!line->left_ssecs)
140 pblk_line_run_ws(pblk, line, NULL, pblk_line_close_ws);
141 pblk_rb_sync_end(&pblk->rwb, NULL);
144 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
145 unsigned int nr_secs)
149 spin_lock(&pblk->trans_lock);
150 for (lba = slba; lba < slba + nr_secs; lba++) {
153 ppa = pblk_trans_map_get(pblk, lba);
155 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
156 pblk_map_invalidate(pblk, ppa);
158 pblk_ppa_set_empty(&ppa);
159 pblk_trans_map_set(pblk, lba, ppa);
161 spin_unlock(&pblk->trans_lock);
164 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
171 pool = pblk->w_rq_pool;
172 rq_size = pblk_w_rq_size;
174 pool = pblk->r_rq_pool;
175 rq_size = pblk_r_rq_size;
178 rqd = mempool_alloc(pool, GFP_KERNEL);
179 memset(rqd, 0, rq_size);
184 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw)
189 pool = pblk->w_rq_pool;
191 pool = pblk->r_rq_pool;
193 mempool_free(rqd, pool);
196 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
202 WARN_ON(off + nr_pages != bio->bi_vcnt);
204 bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE);
205 for (i = off; i < nr_pages + off; i++) {
206 bv = bio->bi_io_vec[i];
207 mempool_free(bv.bv_page, pblk->page_pool);
211 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
214 struct request_queue *q = pblk->dev->q;
218 for (i = 0; i < nr_pages; i++) {
219 page = mempool_alloc(pblk->page_pool, flags);
223 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
224 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
225 pr_err("pblk: could not add page to bio\n");
226 mempool_free(page, pblk->page_pool);
233 pblk_bio_free_pages(pblk, bio, 0, i - 1);
237 static void pblk_write_kick(struct pblk *pblk)
239 wake_up_process(pblk->writer_ts);
240 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
243 void pblk_write_timer_fn(unsigned long data)
245 struct pblk *pblk = (struct pblk *)data;
247 /* kick the write thread every tick to flush outstanding data */
248 pblk_write_kick(pblk);
251 void pblk_write_should_kick(struct pblk *pblk)
253 unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
255 if (secs_avail >= pblk->min_write_pgs)
256 pblk_write_kick(pblk);
259 void pblk_end_bio_sync(struct bio *bio)
261 struct completion *waiting = bio->bi_private;
266 void pblk_end_io_sync(struct nvm_rq *rqd)
268 struct completion *waiting = rqd->private;
273 void pblk_flush_writer(struct pblk *pblk)
277 DECLARE_COMPLETION_ONSTACK(wait);
279 bio = bio_alloc(GFP_KERNEL, 1);
283 bio->bi_iter.bi_sector = 0; /* internal bio */
284 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_OP_FLUSH);
285 bio->bi_private = &wait;
286 bio->bi_end_io = pblk_end_bio_sync;
288 ret = pblk_write_to_cache(pblk, bio, 0);
289 if (ret == NVM_IO_OK) {
290 if (!wait_for_completion_io_timeout(&wait,
291 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
292 pr_err("pblk: flush cache timed out\n");
294 } else if (ret != NVM_IO_DONE) {
295 pr_err("pblk: tear down bio failed\n");
299 pr_err("pblk: flush sync write failed (%u)\n", bio->bi_error);
304 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
306 struct pblk_line_meta *lm = &pblk->lm;
307 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
308 struct list_head *move_list = NULL;
311 if (line->gc_group != PBLK_LINEGC_FULL) {
312 line->gc_group = PBLK_LINEGC_FULL;
313 move_list = &l_mg->gc_full_list;
315 } else if (line->vsc < lm->mid_thrs) {
316 if (line->gc_group != PBLK_LINEGC_HIGH) {
317 line->gc_group = PBLK_LINEGC_HIGH;
318 move_list = &l_mg->gc_high_list;
320 } else if (line->vsc < lm->high_thrs) {
321 if (line->gc_group != PBLK_LINEGC_MID) {
322 line->gc_group = PBLK_LINEGC_MID;
323 move_list = &l_mg->gc_mid_list;
325 } else if (line->vsc < line->sec_in_line) {
326 if (line->gc_group != PBLK_LINEGC_LOW) {
327 line->gc_group = PBLK_LINEGC_LOW;
328 move_list = &l_mg->gc_low_list;
330 } else if (line->vsc == line->sec_in_line) {
331 if (line->gc_group != PBLK_LINEGC_EMPTY) {
332 line->gc_group = PBLK_LINEGC_EMPTY;
333 move_list = &l_mg->gc_empty_list;
336 line->state = PBLK_LINESTATE_CORRUPT;
337 line->gc_group = PBLK_LINEGC_NONE;
338 move_list = &l_mg->corrupt_list;
339 pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
342 lm->high_thrs, lm->mid_thrs);
348 void pblk_discard(struct pblk *pblk, struct bio *bio)
350 sector_t slba = pblk_get_lba(bio);
351 sector_t nr_secs = pblk_get_secs(bio);
353 pblk_invalidate_range(pblk, slba, nr_secs);
356 struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba)
360 spin_lock(&pblk->trans_lock);
361 ppa = pblk_trans_map_get(pblk, lba);
362 spin_unlock(&pblk->trans_lock);
367 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
369 atomic_long_inc(&pblk->write_failed);
370 #ifdef CONFIG_NVM_DEBUG
371 pblk_print_failed_rqd(pblk, rqd, rqd->error);
375 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
377 /* Empty page read is not necessarily an error (e.g., L2P recovery) */
378 if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
379 atomic_long_inc(&pblk->read_empty);
383 switch (rqd->error) {
384 case NVM_RSP_WARN_HIGHECC:
385 atomic_long_inc(&pblk->read_high_ecc);
387 case NVM_RSP_ERR_FAILECC:
388 case NVM_RSP_ERR_FAILCRC:
389 atomic_long_inc(&pblk->read_failed);
392 pr_err("pblk: unknown read error:%d\n", rqd->error);
394 #ifdef CONFIG_NVM_DEBUG
395 pblk_print_failed_rqd(pblk, rqd, rqd->error);
399 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
401 struct nvm_tgt_dev *dev = pblk->dev;
403 #ifdef CONFIG_NVM_DEBUG
404 struct ppa_addr *ppa_list;
406 ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
407 if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
412 if (rqd->opcode == NVM_OP_PWRITE) {
413 struct pblk_line *line;
417 for (i = 0; i < rqd->nr_ppas; i++) {
419 line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
421 spin_lock(&line->lock);
422 if (line->state != PBLK_LINESTATE_OPEN) {
423 pr_err("pblk: bad ppa: line:%d,state:%d\n",
424 line->id, line->state);
426 spin_unlock(&line->lock);
429 spin_unlock(&line->lock);
433 return nvm_submit_io(dev, rqd);
436 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
437 unsigned int nr_secs, unsigned int len,
440 struct nvm_tgt_dev *dev = pblk->dev;
441 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
447 if (l_mg->emeta_alloc_type == PBLK_KMALLOC_META)
448 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
450 bio = bio_kmalloc(gfp_mask, nr_secs);
452 return ERR_PTR(-ENOMEM);
454 for (i = 0; i < nr_secs; i++) {
455 page = vmalloc_to_page(kaddr);
457 pr_err("pblk: could not map vmalloc bio\n");
459 bio = ERR_PTR(-ENOMEM);
463 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
464 if (ret != PAGE_SIZE) {
465 pr_err("pblk: could not add page to bio\n");
467 bio = ERR_PTR(-ENOMEM);
477 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
478 unsigned long secs_to_flush)
480 int max = pblk->max_write_pgs;
481 int min = pblk->min_write_pgs;
482 int secs_to_sync = 0;
484 if (secs_avail >= max)
486 else if (secs_avail >= min)
487 secs_to_sync = min * (secs_avail / min);
488 else if (secs_to_flush)
494 static u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line,
500 /* logic error: ppa out-of-bounds. Prevent generating bad address */
501 if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
502 WARN(1, "pblk: page allocation out of bounds\n");
503 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
506 line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
507 pblk->lm.sec_per_line, line->cur_sec);
508 for (i = 0; i < nr_secs; i++, line->cur_sec++)
509 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
514 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
518 /* Lock needed in case a write fails and a recovery needs to remap
519 * failed write buffer entries
521 spin_lock(&line->lock);
522 addr = __pblk_alloc_page(pblk, line, nr_secs);
523 line->left_msecs -= nr_secs;
524 WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
525 spin_unlock(&line->lock);
531 * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
532 * taking the per LUN semaphore.
534 static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
537 struct nvm_tgt_dev *dev = pblk->dev;
538 struct nvm_geo *geo = &dev->geo;
539 struct pblk_line_meta *lm = &pblk->lm;
542 struct ppa_addr *ppa_list;
543 dma_addr_t dma_ppa_list;
544 void *emeta = line->emeta;
545 int min = pblk->min_write_pgs;
546 int left_ppas = lm->emeta_sec;
553 DECLARE_COMPLETION_ONSTACK(wait);
556 bio_op = REQ_OP_WRITE;
557 cmd_op = NVM_OP_PWRITE;
558 flags = pblk_set_progr_mode(pblk, WRITE);
559 } else if (dir == READ) {
560 bio_op = REQ_OP_READ;
561 cmd_op = NVM_OP_PREAD;
562 flags = pblk_set_read_mode(pblk);
566 ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_ppa_list);
571 memset(&rqd, 0, sizeof(struct nvm_rq));
573 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
574 rq_len = rq_ppas * geo->sec_size;
576 bio = pblk_bio_map_addr(pblk, emeta, rq_ppas, rq_len, GFP_KERNEL);
582 bio->bi_iter.bi_sector = 0; /* internal bio */
583 bio_set_op_attrs(bio, bio_op, 0);
588 rqd.nr_ppas = rq_ppas;
589 rqd.ppa_list = ppa_list;
590 rqd.dma_ppa_list = dma_ppa_list;
591 rqd.end_io = pblk_end_io_sync;
595 for (i = 0; i < rqd.nr_ppas; ) {
596 spin_lock(&line->lock);
597 paddr = __pblk_alloc_page(pblk, line, min);
598 spin_unlock(&line->lock);
599 for (j = 0; j < min; j++, i++, paddr++)
601 addr_to_gen_ppa(pblk, paddr, id);
604 for (i = 0; i < rqd.nr_ppas; ) {
605 struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
606 int pos = pblk_dev_ppa_to_pos(geo, ppa);
608 while (test_bit(pos, line->blk_bitmap)) {
610 if (pblk_boundary_paddr_checks(pblk, paddr)) {
611 pr_err("pblk: corrupt emeta line:%d\n",
618 ppa = addr_to_gen_ppa(pblk, paddr, id);
619 pos = pblk_dev_ppa_to_pos(geo, ppa);
622 if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
623 pr_err("pblk: corrupt emeta line:%d\n",
630 for (j = 0; j < min; j++, i++, paddr++)
632 addr_to_gen_ppa(pblk, paddr, line->id);
636 ret = pblk_submit_io(pblk, &rqd);
638 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
643 if (!wait_for_completion_io_timeout(&wait,
644 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
645 pr_err("pblk: emeta I/O timed out\n");
647 reinit_completion(&wait);
653 pblk_log_write_err(pblk, &rqd);
655 pblk_log_read_err(pblk, &rqd);
659 left_ppas -= rq_ppas;
663 nvm_dev_dma_free(dev->parent, ppa_list, dma_ppa_list);
667 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
669 struct nvm_tgt_dev *dev = pblk->dev;
670 struct nvm_geo *geo = &dev->geo;
671 struct pblk_line_meta *lm = &pblk->lm;
674 /* This usually only happens on bad lines */
675 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
676 if (bit >= lm->blk_per_line)
679 return bit * geo->sec_per_pl;
682 static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
685 struct nvm_tgt_dev *dev = pblk->dev;
686 struct pblk_line_meta *lm = &pblk->lm;
689 __le64 *lba_list = NULL;
693 DECLARE_COMPLETION_ONSTACK(wait);
696 bio_op = REQ_OP_WRITE;
697 cmd_op = NVM_OP_PWRITE;
698 flags = pblk_set_progr_mode(pblk, WRITE);
699 lba_list = pblk_line_emeta_to_lbas(line->emeta);
700 } else if (dir == READ) {
701 bio_op = REQ_OP_READ;
702 cmd_op = NVM_OP_PREAD;
703 flags = pblk_set_read_mode(pblk);
707 memset(&rqd, 0, sizeof(struct nvm_rq));
709 rqd.ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
714 bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
720 bio->bi_iter.bi_sector = 0; /* internal bio */
721 bio_set_op_attrs(bio, bio_op, 0);
726 rqd.nr_ppas = lm->smeta_sec;
727 rqd.end_io = pblk_end_io_sync;
730 for (i = 0; i < lm->smeta_sec; i++, paddr++) {
731 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
733 lba_list[paddr] = cpu_to_le64(ADDR_EMPTY);
737 * This I/O is sent by the write thread when a line is replace. Since
738 * the write thread is the only one sending write and erase commands,
739 * there is no need to take the LUN semaphore.
741 ret = pblk_submit_io(pblk, &rqd);
743 pr_err("pblk: smeta I/O submission failed: %d\n", ret);
748 if (!wait_for_completion_io_timeout(&wait,
749 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
750 pr_err("pblk: smeta I/O timed out\n");
755 pblk_log_write_err(pblk, &rqd);
757 pblk_log_read_err(pblk, &rqd);
761 nvm_dev_dma_free(dev->parent, rqd.ppa_list, rqd.dma_ppa_list);
766 int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
768 u64 bpaddr = pblk_line_smeta_start(pblk, line);
770 return pblk_line_submit_smeta_io(pblk, line, bpaddr, READ);
773 int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line)
775 return pblk_line_submit_emeta_io(pblk, line, line->emeta_ssec, READ);
778 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
781 rqd->opcode = NVM_OP_ERASE;
784 rqd->flags = pblk_set_progr_mode(pblk, ERASE);
788 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
792 DECLARE_COMPLETION_ONSTACK(wait);
794 memset(&rqd, 0, sizeof(struct nvm_rq));
796 pblk_setup_e_rq(pblk, &rqd, ppa);
798 rqd.end_io = pblk_end_io_sync;
801 /* The write thread schedules erases so that it minimizes disturbances
802 * with writes. Thus, there is no need to take the LUN semaphore.
804 ret = pblk_submit_io(pblk, &rqd);
806 struct nvm_tgt_dev *dev = pblk->dev;
807 struct nvm_geo *geo = &dev->geo;
809 pr_err("pblk: could not sync erase line:%d,blk:%d\n",
810 pblk_dev_ppa_to_line(ppa),
811 pblk_dev_ppa_to_pos(geo, ppa));
817 if (!wait_for_completion_io_timeout(&wait,
818 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
819 pr_err("pblk: sync erase timed out\n");
824 __pblk_end_io_erase(pblk, &rqd);
829 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
831 struct pblk_line_meta *lm = &pblk->lm;
835 /* Erase one block at the time and only erase good blocks */
836 while ((bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
837 bit + 1)) < lm->blk_per_line) {
838 ppa = pblk->luns[bit].bppa; /* set ch and lun */
839 ppa.g.blk = line->id;
841 /* If the erase fails, the block is bad and should be marked */
843 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
845 if (pblk_blk_erase_sync(pblk, ppa)) {
846 pr_err("pblk: failed to erase line %d\n", line->id);
854 /* For now lines are always assumed full lines. Thus, smeta former and current
855 * lun bitmaps are omitted.
857 static int pblk_line_set_metadata(struct pblk *pblk, struct pblk_line *line,
858 struct pblk_line *cur)
860 struct nvm_tgt_dev *dev = pblk->dev;
861 struct nvm_geo *geo = &dev->geo;
862 struct pblk_line_meta *lm = &pblk->lm;
863 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
864 struct line_smeta *smeta = line->smeta;
865 struct line_emeta *emeta = line->emeta;
868 /* After erasing the line, new bad blocks might appear and we risk
869 * having an invalid line
871 nr_blk_line = lm->blk_per_line -
872 bitmap_weight(line->blk_bitmap, lm->blk_per_line);
873 if (nr_blk_line < lm->min_blk_line) {
874 spin_lock(&l_mg->free_lock);
875 spin_lock(&line->lock);
876 line->state = PBLK_LINESTATE_BAD;
877 spin_unlock(&line->lock);
879 list_add_tail(&line->list, &l_mg->bad_list);
880 spin_unlock(&l_mg->free_lock);
882 pr_debug("pblk: line %d is bad\n", line->id);
887 /* Run-time metadata */
888 line->lun_bitmap = ((void *)(smeta)) + sizeof(struct line_smeta);
890 /* Mark LUNs allocated in this line (all for now) */
891 bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
893 smeta->header.identifier = cpu_to_le32(PBLK_MAGIC);
894 memcpy(smeta->header.uuid, pblk->instance_uuid, 16);
895 smeta->header.id = cpu_to_le32(line->id);
896 smeta->header.type = cpu_to_le16(line->type);
897 smeta->header.version = cpu_to_le16(1);
900 smeta->seq_nr = cpu_to_le64(line->seq_nr);
901 smeta->window_wr_lun = cpu_to_le32(geo->nr_luns);
903 /* Fill metadata among lines */
905 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
906 smeta->prev_id = cpu_to_le32(cur->id);
907 cur->emeta->next_id = cpu_to_le32(line->id);
909 smeta->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
912 /* All smeta must be set at this point */
913 smeta->header.crc = cpu_to_le32(pblk_calc_meta_header_crc(pblk, smeta));
914 smeta->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta));
917 memcpy(&emeta->header, &smeta->header, sizeof(struct line_header));
918 emeta->seq_nr = cpu_to_le64(line->seq_nr);
919 emeta->nr_lbas = cpu_to_le64(line->sec_in_line);
920 emeta->nr_valid_lbas = cpu_to_le64(0);
921 emeta->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
922 emeta->crc = cpu_to_le32(0);
923 emeta->prev_id = smeta->prev_id;
928 /* For now lines are always assumed full lines. Thus, smeta former and current
929 * lun bitmaps are omitted.
931 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
934 struct nvm_tgt_dev *dev = pblk->dev;
935 struct nvm_geo *geo = &dev->geo;
936 struct pblk_line_meta *lm = &pblk->lm;
937 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
942 line->sec_in_line = lm->sec_per_line;
944 /* Capture bad block information on line mapping bitmaps */
945 while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
946 bit + 1)) < lm->blk_per_line) {
947 off = bit * geo->sec_per_pl;
948 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
950 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
952 line->sec_in_line -= geo->sec_per_blk;
953 if (bit >= lm->emeta_bb)
957 /* Mark smeta metadata sectors as bad sectors */
958 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
959 off = bit * geo->sec_per_pl;
961 bitmap_set(line->map_bitmap, off, lm->smeta_sec);
962 line->sec_in_line -= lm->smeta_sec;
963 line->smeta_ssec = off;
964 line->cur_sec = off + lm->smeta_sec;
966 if (init && pblk_line_submit_smeta_io(pblk, line, off, WRITE)) {
967 pr_debug("pblk: line smeta I/O failed. Retry\n");
968 off += geo->sec_per_pl;
972 bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
974 /* Mark emeta metadata sectors as bad sectors. We need to consider bad
975 * blocks to make sure that there are enough sectors to store emeta
977 bit = lm->sec_per_line;
978 off = lm->sec_per_line - lm->emeta_sec;
979 bitmap_set(line->invalid_bitmap, off, lm->emeta_sec);
981 off -= geo->sec_per_pl;
982 if (!test_bit(off, line->invalid_bitmap)) {
983 bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl);
988 line->sec_in_line -= lm->emeta_sec;
989 line->emeta_ssec = off;
990 line->vsc = line->left_ssecs = line->left_msecs = line->sec_in_line;
992 if (lm->sec_per_line - line->sec_in_line !=
993 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
994 spin_lock(&line->lock);
995 line->state = PBLK_LINESTATE_BAD;
996 spin_unlock(&line->lock);
998 list_add_tail(&line->list, &l_mg->bad_list);
999 pr_err("pblk: unexpected line %d is bad\n", line->id);
1007 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1009 struct pblk_line_meta *lm = &pblk->lm;
1011 line->map_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1012 if (!line->map_bitmap)
1014 memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1016 /* invalid_bitmap is special since it is used when line is closed. No
1017 * need to zeroized; it will be initialized using bb info form
1020 line->invalid_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1021 if (!line->invalid_bitmap) {
1022 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1026 spin_lock(&line->lock);
1027 if (line->state != PBLK_LINESTATE_FREE) {
1028 spin_unlock(&line->lock);
1029 WARN(1, "pblk: corrupted line state\n");
1032 line->state = PBLK_LINESTATE_OPEN;
1033 spin_unlock(&line->lock);
1035 /* Bad blocks do not need to be erased */
1036 bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1037 line->left_eblks = line->blk_in_line;
1038 atomic_set(&line->left_seblks, line->left_eblks);
1040 kref_init(&line->ref);
1045 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1047 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1050 spin_lock(&l_mg->free_lock);
1051 l_mg->data_line = line;
1052 list_del(&line->list);
1053 spin_unlock(&l_mg->free_lock);
1055 ret = pblk_line_prepare(pblk, line);
1057 list_add(&line->list, &l_mg->free_list);
1061 pblk_rl_free_lines_dec(&pblk->rl, line);
1063 if (!pblk_line_init_bb(pblk, line, 0)) {
1064 list_add(&line->list, &l_mg->free_list);
1071 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1073 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1074 line->map_bitmap = NULL;
1079 struct pblk_line *pblk_line_get(struct pblk *pblk)
1081 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1082 struct pblk_line_meta *lm = &pblk->lm;
1083 struct pblk_line *line = NULL;
1086 lockdep_assert_held(&l_mg->free_lock);
1089 if (list_empty(&l_mg->free_list)) {
1090 pr_err("pblk: no free lines\n");
1094 line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1095 list_del(&line->list);
1096 l_mg->nr_free_lines--;
1098 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1099 if (unlikely(bit >= lm->blk_per_line)) {
1100 spin_lock(&line->lock);
1101 line->state = PBLK_LINESTATE_BAD;
1102 spin_unlock(&line->lock);
1104 list_add_tail(&line->list, &l_mg->bad_list);
1106 pr_debug("pblk: line %d is bad\n", line->id);
1110 if (pblk_line_prepare(pblk, line)) {
1111 pr_err("pblk: failed to prepare line %d\n", line->id);
1112 list_add(&line->list, &l_mg->free_list);
1120 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1121 struct pblk_line *line)
1123 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1124 struct pblk_line *retry_line;
1126 spin_lock(&l_mg->free_lock);
1127 retry_line = pblk_line_get(pblk);
1129 spin_unlock(&l_mg->free_lock);
1133 retry_line->smeta = line->smeta;
1134 retry_line->emeta = line->emeta;
1135 retry_line->meta_line = line->meta_line;
1136 retry_line->map_bitmap = line->map_bitmap;
1137 retry_line->invalid_bitmap = line->invalid_bitmap;
1139 line->map_bitmap = NULL;
1140 line->invalid_bitmap = NULL;
1143 spin_unlock(&l_mg->free_lock);
1145 if (pblk_line_erase(pblk, retry_line))
1148 pblk_rl_free_lines_dec(&pblk->rl, retry_line);
1150 l_mg->data_line = retry_line;
1155 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1157 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1158 struct pblk_line *line;
1162 spin_lock(&l_mg->free_lock);
1163 line = pblk_line_get(pblk);
1165 spin_unlock(&l_mg->free_lock);
1169 line->seq_nr = l_mg->d_seq_nr++;
1170 line->type = PBLK_LINETYPE_DATA;
1171 l_mg->data_line = line;
1173 meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
1174 set_bit(meta_line, &l_mg->meta_bitmap);
1175 line->smeta = l_mg->sline_meta[meta_line].meta;
1176 line->emeta = l_mg->eline_meta[meta_line].meta;
1177 line->meta_line = meta_line;
1179 /* Allocate next line for preparation */
1180 l_mg->data_next = pblk_line_get(pblk);
1181 if (l_mg->data_next) {
1182 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1183 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1186 spin_unlock(&l_mg->free_lock);
1188 pblk_rl_free_lines_dec(&pblk->rl, line);
1190 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1192 if (pblk_line_erase(pblk, line))
1196 if (!pblk_line_set_metadata(pblk, line, NULL)) {
1197 line = pblk_line_retry(pblk, line);
1204 if (!pblk_line_init_bb(pblk, line, 1)) {
1205 line = pblk_line_retry(pblk, line);
1215 struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1217 struct pblk_line_meta *lm = &pblk->lm;
1218 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1219 struct pblk_line *cur, *new;
1220 unsigned int left_seblks;
1224 cur = l_mg->data_line;
1225 new = l_mg->data_next;
1228 l_mg->data_line = new;
1231 left_seblks = atomic_read(&new->left_seblks);
1233 /* If line is not fully erased, erase it */
1234 if (new->left_eblks) {
1235 if (pblk_line_erase(pblk, new))
1243 spin_lock(&l_mg->free_lock);
1244 /* Allocate next line for preparation */
1245 l_mg->data_next = pblk_line_get(pblk);
1246 if (l_mg->data_next) {
1247 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1248 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1253 meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
1254 if (meta_line == PBLK_DATA_LINES) {
1255 spin_unlock(&l_mg->free_lock);
1257 spin_lock(&l_mg->free_lock);
1261 set_bit(meta_line, &l_mg->meta_bitmap);
1262 new->smeta = l_mg->sline_meta[meta_line].meta;
1263 new->emeta = l_mg->eline_meta[meta_line].meta;
1264 new->meta_line = meta_line;
1266 memset(new->smeta, 0, lm->smeta_len);
1267 memset(new->emeta, 0, lm->emeta_len);
1268 spin_unlock(&l_mg->free_lock);
1271 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1274 if (!pblk_line_set_metadata(pblk, new, cur)) {
1275 new = pblk_line_retry(pblk, new);
1282 if (!pblk_line_init_bb(pblk, new, 1)) {
1283 new = pblk_line_retry(pblk, new);
1293 void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
1295 if (line->map_bitmap)
1296 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1297 if (line->invalid_bitmap)
1298 mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
1300 line->map_bitmap = NULL;
1301 line->invalid_bitmap = NULL;
1304 void pblk_line_put(struct kref *ref)
1306 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1307 struct pblk *pblk = line->pblk;
1308 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1310 spin_lock(&line->lock);
1311 WARN_ON(line->state != PBLK_LINESTATE_GC);
1312 line->state = PBLK_LINESTATE_FREE;
1313 line->gc_group = PBLK_LINEGC_NONE;
1314 pblk_line_free(pblk, line);
1315 spin_unlock(&line->lock);
1317 spin_lock(&l_mg->free_lock);
1318 list_add_tail(&line->list, &l_mg->free_list);
1319 l_mg->nr_free_lines++;
1320 spin_unlock(&l_mg->free_lock);
1322 pblk_rl_free_lines_inc(&pblk->rl, line);
1325 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1330 rqd = mempool_alloc(pblk->r_rq_pool, GFP_KERNEL);
1331 memset(rqd, 0, pblk_r_rq_size);
1333 pblk_setup_e_rq(pblk, rqd, ppa);
1335 rqd->end_io = pblk_end_io_erase;
1336 rqd->private = pblk;
1338 /* The write thread schedules erases so that it minimizes disturbances
1339 * with writes. Thus, there is no need to take the LUN semaphore.
1341 err = pblk_submit_io(pblk, rqd);
1343 struct nvm_tgt_dev *dev = pblk->dev;
1344 struct nvm_geo *geo = &dev->geo;
1346 pr_err("pblk: could not async erase line:%d,blk:%d\n",
1347 pblk_dev_ppa_to_line(ppa),
1348 pblk_dev_ppa_to_pos(geo, ppa));
1354 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1356 return pblk->l_mg.data_line;
1359 struct pblk_line *pblk_line_get_data_next(struct pblk *pblk)
1361 return pblk->l_mg.data_next;
1364 int pblk_line_is_full(struct pblk_line *line)
1366 return (line->left_msecs == 0);
1369 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1371 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1372 struct list_head *move_list;
1374 line->emeta->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, line->emeta));
1376 if (pblk_line_submit_emeta_io(pblk, line, line->cur_sec, WRITE))
1377 pr_err("pblk: line %d close I/O failed\n", line->id);
1379 WARN(!bitmap_full(line->map_bitmap, line->sec_in_line),
1380 "pblk: corrupt closed line %d\n", line->id);
1382 spin_lock(&l_mg->free_lock);
1383 WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1384 spin_unlock(&l_mg->free_lock);
1386 spin_lock(&l_mg->gc_lock);
1387 spin_lock(&line->lock);
1388 WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1389 line->state = PBLK_LINESTATE_CLOSED;
1390 move_list = pblk_line_gc_list(pblk, line);
1392 list_add_tail(&line->list, move_list);
1394 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1395 line->map_bitmap = NULL;
1399 spin_unlock(&line->lock);
1400 spin_unlock(&l_mg->gc_lock);
1403 void pblk_line_close_ws(struct work_struct *work)
1405 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1407 struct pblk *pblk = line_ws->pblk;
1408 struct pblk_line *line = line_ws->line;
1410 pblk_line_close(pblk, line);
1411 mempool_free(line_ws, pblk->line_ws_pool);
1414 void pblk_line_mark_bb(struct work_struct *work)
1416 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1418 struct pblk *pblk = line_ws->pblk;
1419 struct nvm_tgt_dev *dev = pblk->dev;
1420 struct ppa_addr *ppa = line_ws->priv;
1423 ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
1425 struct pblk_line *line;
1428 line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
1429 pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
1431 pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
1436 mempool_free(line_ws, pblk->line_ws_pool);
1439 void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1440 void (*work)(struct work_struct *))
1442 struct pblk_line_ws *line_ws;
1444 line_ws = mempool_alloc(pblk->line_ws_pool, GFP_ATOMIC);
1448 line_ws->pblk = pblk;
1449 line_ws->line = line;
1450 line_ws->priv = priv;
1452 INIT_WORK(&line_ws->ws, work);
1453 queue_work(pblk->kw_wq, &line_ws->ws);
1456 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1457 unsigned long *lun_bitmap)
1459 struct nvm_tgt_dev *dev = pblk->dev;
1460 struct nvm_geo *geo = &dev->geo;
1461 struct pblk_lun *rlun;
1462 int lun_id = ppa_list[0].g.ch * geo->luns_per_chnl + ppa_list[0].g.lun;
1466 * Only send one inflight I/O per LUN. Since we map at a page
1467 * granurality, all ppas in the I/O will map to the same LUN
1469 #ifdef CONFIG_NVM_DEBUG
1472 for (i = 1; i < nr_ppas; i++)
1473 WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
1474 ppa_list[0].g.ch != ppa_list[i].g.ch);
1476 /* If the LUN has been locked for this same request, do no attempt to
1479 if (test_and_set_bit(lun_id, lun_bitmap))
1482 rlun = &pblk->luns[lun_id];
1483 ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
1487 pr_err("pblk: lun semaphore timed out\n");
1490 pr_err("pblk: lun semaphore timed out\n");
1496 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1497 unsigned long *lun_bitmap)
1499 struct nvm_tgt_dev *dev = pblk->dev;
1500 struct nvm_geo *geo = &dev->geo;
1501 struct pblk_lun *rlun;
1502 int nr_luns = geo->nr_luns;
1505 while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
1506 rlun = &pblk->luns[bit];
1513 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1515 struct ppa_addr l2p_ppa;
1517 /* logic error: lba out-of-bounds. Ignore update */
1518 if (!(lba < pblk->rl.nr_secs)) {
1519 WARN(1, "pblk: corrupted L2P map request\n");
1523 spin_lock(&pblk->trans_lock);
1524 l2p_ppa = pblk_trans_map_get(pblk, lba);
1526 if (!pblk_addr_in_cache(l2p_ppa) && !pblk_ppa_empty(l2p_ppa))
1527 pblk_map_invalidate(pblk, l2p_ppa);
1529 pblk_trans_map_set(pblk, lba, ppa);
1530 spin_unlock(&pblk->trans_lock);
1533 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1535 #ifdef CONFIG_NVM_DEBUG
1536 /* Callers must ensure that the ppa points to a cache address */
1537 BUG_ON(!pblk_addr_in_cache(ppa));
1538 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1541 pblk_update_map(pblk, lba, ppa);
1544 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1545 struct pblk_line *gc_line)
1547 struct ppa_addr l2p_ppa;
1550 #ifdef CONFIG_NVM_DEBUG
1551 /* Callers must ensure that the ppa points to a cache address */
1552 BUG_ON(!pblk_addr_in_cache(ppa));
1553 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1556 /* logic error: lba out-of-bounds. Ignore update */
1557 if (!(lba < pblk->rl.nr_secs)) {
1558 WARN(1, "pblk: corrupted L2P map request\n");
1562 spin_lock(&pblk->trans_lock);
1563 l2p_ppa = pblk_trans_map_get(pblk, lba);
1565 /* Prevent updated entries to be overwritten by GC */
1566 if (pblk_addr_in_cache(l2p_ppa) || pblk_ppa_empty(l2p_ppa) ||
1567 pblk_tgt_ppa_to_line(l2p_ppa) != gc_line->id) {
1572 pblk_trans_map_set(pblk, lba, ppa);
1574 spin_unlock(&pblk->trans_lock);
1578 void pblk_update_map_dev(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1579 struct ppa_addr entry_line)
1581 struct ppa_addr l2p_line;
1583 #ifdef CONFIG_NVM_DEBUG
1584 /* Callers must ensure that the ppa points to a device address */
1585 BUG_ON(pblk_addr_in_cache(ppa));
1587 /* Invalidate and discard padded entries */
1588 if (lba == ADDR_EMPTY) {
1589 #ifdef CONFIG_NVM_DEBUG
1590 atomic_long_inc(&pblk->padded_wb);
1592 pblk_map_invalidate(pblk, ppa);
1596 /* logic error: lba out-of-bounds. Ignore update */
1597 if (!(lba < pblk->rl.nr_secs)) {
1598 WARN(1, "pblk: corrupted L2P map request\n");
1602 spin_lock(&pblk->trans_lock);
1603 l2p_line = pblk_trans_map_get(pblk, lba);
1605 /* Do not update L2P if the cacheline has been updated. In this case,
1606 * the mapped ppa must be invalidated
1608 if (l2p_line.ppa != entry_line.ppa) {
1609 if (!pblk_ppa_empty(ppa))
1610 pblk_map_invalidate(pblk, ppa);
1614 #ifdef CONFIG_NVM_DEBUG
1615 WARN_ON(!pblk_addr_in_cache(l2p_line) && !pblk_ppa_empty(l2p_line));
1618 pblk_trans_map_set(pblk, lba, ppa);
1620 spin_unlock(&pblk->trans_lock);
1623 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
1624 sector_t blba, int nr_secs)
1628 spin_lock(&pblk->trans_lock);
1629 for (i = 0; i < nr_secs; i++)
1630 ppas[i] = pblk_trans_map_get(pblk, blba + i);
1631 spin_unlock(&pblk->trans_lock);
1634 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
1635 u64 *lba_list, int nr_secs)
1640 spin_lock(&pblk->trans_lock);
1641 for (i = 0; i < nr_secs; i++) {
1643 if (lba == ADDR_EMPTY) {
1644 ppas[i].ppa = ADDR_EMPTY;
1646 /* logic error: lba out-of-bounds. Ignore update */
1647 if (!(lba < pblk->rl.nr_secs)) {
1648 WARN(1, "pblk: corrupted L2P map request\n");
1651 ppas[i] = pblk_trans_map_get(pblk, lba);
1654 spin_unlock(&pblk->trans_lock);