2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * pblk-core.c - pblk's core functionality
20 #include <linux/time.h>
22 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
25 struct nvm_tgt_dev *dev = pblk->dev;
26 struct nvm_geo *geo = &dev->geo;
27 int pos = pblk_dev_ppa_to_pos(geo, *ppa);
29 pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
30 atomic_long_inc(&pblk->erase_failed);
32 atomic_dec(&line->blk_in_line);
33 if (test_and_set_bit(pos, line->blk_bitmap))
34 pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
37 pblk_line_run_ws(pblk, NULL, ppa, pblk_line_mark_bb);
40 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
42 struct pblk_line *line;
44 line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
45 atomic_dec(&line->left_seblks);
50 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
55 pblk_mark_bb(pblk, line, ppa);
59 /* Erase completion assumes that only one block is erased at the time */
60 static void pblk_end_io_erase(struct nvm_rq *rqd)
62 struct pblk *pblk = rqd->private;
64 __pblk_end_io_erase(pblk, rqd);
65 mempool_free(rqd, pblk->g_rq_pool);
68 void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
71 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
72 struct list_head *move_list = NULL;
74 /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
75 * table is modified with reclaimed sectors, a check is done to endure
76 * that newer updates are not overwritten.
78 spin_lock(&line->lock);
79 if (line->state == PBLK_LINESTATE_GC ||
80 line->state == PBLK_LINESTATE_FREE) {
81 spin_unlock(&line->lock);
85 if (test_and_set_bit(paddr, line->invalid_bitmap)) {
86 WARN_ONCE(1, "pblk: double invalidate\n");
87 spin_unlock(&line->lock);
90 le32_add_cpu(line->vsc, -1);
92 if (line->state == PBLK_LINESTATE_CLOSED)
93 move_list = pblk_line_gc_list(pblk, line);
94 spin_unlock(&line->lock);
97 spin_lock(&l_mg->gc_lock);
98 spin_lock(&line->lock);
99 /* Prevent moving a line that has just been chosen for GC */
100 if (line->state == PBLK_LINESTATE_GC ||
101 line->state == PBLK_LINESTATE_FREE) {
102 spin_unlock(&line->lock);
103 spin_unlock(&l_mg->gc_lock);
106 spin_unlock(&line->lock);
108 list_move_tail(&line->list, move_list);
109 spin_unlock(&l_mg->gc_lock);
113 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
115 struct pblk_line *line;
119 #ifdef CONFIG_NVM_DEBUG
120 /* Callers must ensure that the ppa points to a device address */
121 BUG_ON(pblk_addr_in_cache(ppa));
122 BUG_ON(pblk_ppa_empty(ppa));
125 line_id = pblk_tgt_ppa_to_line(ppa);
126 line = &pblk->lines[line_id];
127 paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
129 __pblk_map_invalidate(pblk, line, paddr);
132 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
133 unsigned int nr_secs)
137 spin_lock(&pblk->trans_lock);
138 for (lba = slba; lba < slba + nr_secs; lba++) {
141 ppa = pblk_trans_map_get(pblk, lba);
143 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
144 pblk_map_invalidate(pblk, ppa);
146 pblk_ppa_set_empty(&ppa);
147 pblk_trans_map_set(pblk, lba, ppa);
149 spin_unlock(&pblk->trans_lock);
152 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
159 pool = pblk->w_rq_pool;
160 rq_size = pblk_w_rq_size;
162 pool = pblk->g_rq_pool;
163 rq_size = pblk_g_rq_size;
166 rqd = mempool_alloc(pool, GFP_KERNEL);
167 memset(rqd, 0, rq_size);
172 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw)
177 pool = pblk->w_rq_pool;
179 pool = pblk->g_rq_pool;
181 mempool_free(rqd, pool);
184 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
190 WARN_ON(off + nr_pages != bio->bi_vcnt);
192 bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE);
193 for (i = off; i < nr_pages + off; i++) {
194 bv = bio->bi_io_vec[i];
195 mempool_free(bv.bv_page, pblk->page_pool);
199 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
202 struct request_queue *q = pblk->dev->q;
206 for (i = 0; i < nr_pages; i++) {
207 page = mempool_alloc(pblk->page_pool, flags);
211 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
212 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
213 pr_err("pblk: could not add page to bio\n");
214 mempool_free(page, pblk->page_pool);
221 pblk_bio_free_pages(pblk, bio, 0, i - 1);
225 static void pblk_write_kick(struct pblk *pblk)
227 wake_up_process(pblk->writer_ts);
228 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
231 void pblk_write_timer_fn(unsigned long data)
233 struct pblk *pblk = (struct pblk *)data;
235 /* kick the write thread every tick to flush outstanding data */
236 pblk_write_kick(pblk);
239 void pblk_write_should_kick(struct pblk *pblk)
241 unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
243 if (secs_avail >= pblk->min_write_pgs)
244 pblk_write_kick(pblk);
247 void pblk_end_bio_sync(struct bio *bio)
249 struct completion *waiting = bio->bi_private;
254 void pblk_end_io_sync(struct nvm_rq *rqd)
256 struct completion *waiting = rqd->private;
261 void pblk_flush_writer(struct pblk *pblk)
265 DECLARE_COMPLETION_ONSTACK(wait);
267 bio = bio_alloc(GFP_KERNEL, 1);
271 bio->bi_iter.bi_sector = 0; /* internal bio */
272 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_OP_FLUSH);
273 bio->bi_private = &wait;
274 bio->bi_end_io = pblk_end_bio_sync;
276 ret = pblk_write_to_cache(pblk, bio, 0);
277 if (ret == NVM_IO_OK) {
278 if (!wait_for_completion_io_timeout(&wait,
279 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
280 pr_err("pblk: flush cache timed out\n");
282 } else if (ret != NVM_IO_DONE) {
283 pr_err("pblk: tear down bio failed\n");
287 pr_err("pblk: flush sync write failed (%u)\n", bio->bi_status);
292 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
294 struct pblk_line_meta *lm = &pblk->lm;
295 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
296 struct list_head *move_list = NULL;
297 int vsc = le32_to_cpu(*line->vsc);
300 if (line->gc_group != PBLK_LINEGC_FULL) {
301 line->gc_group = PBLK_LINEGC_FULL;
302 move_list = &l_mg->gc_full_list;
304 } else if (vsc < lm->mid_thrs) {
305 if (line->gc_group != PBLK_LINEGC_HIGH) {
306 line->gc_group = PBLK_LINEGC_HIGH;
307 move_list = &l_mg->gc_high_list;
309 } else if (vsc < lm->high_thrs) {
310 if (line->gc_group != PBLK_LINEGC_MID) {
311 line->gc_group = PBLK_LINEGC_MID;
312 move_list = &l_mg->gc_mid_list;
314 } else if (vsc < line->sec_in_line) {
315 if (line->gc_group != PBLK_LINEGC_LOW) {
316 line->gc_group = PBLK_LINEGC_LOW;
317 move_list = &l_mg->gc_low_list;
319 } else if (vsc == line->sec_in_line) {
320 if (line->gc_group != PBLK_LINEGC_EMPTY) {
321 line->gc_group = PBLK_LINEGC_EMPTY;
322 move_list = &l_mg->gc_empty_list;
325 line->state = PBLK_LINESTATE_CORRUPT;
326 line->gc_group = PBLK_LINEGC_NONE;
327 move_list = &l_mg->corrupt_list;
328 pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
331 lm->high_thrs, lm->mid_thrs);
337 void pblk_discard(struct pblk *pblk, struct bio *bio)
339 sector_t slba = pblk_get_lba(bio);
340 sector_t nr_secs = pblk_get_secs(bio);
342 pblk_invalidate_range(pblk, slba, nr_secs);
345 struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba)
349 spin_lock(&pblk->trans_lock);
350 ppa = pblk_trans_map_get(pblk, lba);
351 spin_unlock(&pblk->trans_lock);
356 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
358 atomic_long_inc(&pblk->write_failed);
359 #ifdef CONFIG_NVM_DEBUG
360 pblk_print_failed_rqd(pblk, rqd, rqd->error);
364 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
366 /* Empty page read is not necessarily an error (e.g., L2P recovery) */
367 if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
368 atomic_long_inc(&pblk->read_empty);
372 switch (rqd->error) {
373 case NVM_RSP_WARN_HIGHECC:
374 atomic_long_inc(&pblk->read_high_ecc);
376 case NVM_RSP_ERR_FAILECC:
377 case NVM_RSP_ERR_FAILCRC:
378 atomic_long_inc(&pblk->read_failed);
381 pr_err("pblk: unknown read error:%d\n", rqd->error);
383 #ifdef CONFIG_NVM_DEBUG
384 pblk_print_failed_rqd(pblk, rqd, rqd->error);
388 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
390 pblk->sec_per_write = sec_per_write;
393 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
395 struct nvm_tgt_dev *dev = pblk->dev;
397 #ifdef CONFIG_NVM_DEBUG
398 struct ppa_addr *ppa_list;
400 ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
401 if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
406 if (rqd->opcode == NVM_OP_PWRITE) {
407 struct pblk_line *line;
411 for (i = 0; i < rqd->nr_ppas; i++) {
413 line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
415 spin_lock(&line->lock);
416 if (line->state != PBLK_LINESTATE_OPEN) {
417 pr_err("pblk: bad ppa: line:%d,state:%d\n",
418 line->id, line->state);
420 spin_unlock(&line->lock);
423 spin_unlock(&line->lock);
427 return nvm_submit_io(dev, rqd);
430 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
431 unsigned int nr_secs, unsigned int len,
434 struct nvm_tgt_dev *dev = pblk->dev;
435 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
441 if (l_mg->emeta_alloc_type == PBLK_KMALLOC_META)
442 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
444 bio = bio_kmalloc(gfp_mask, nr_secs);
446 return ERR_PTR(-ENOMEM);
448 for (i = 0; i < nr_secs; i++) {
449 page = vmalloc_to_page(kaddr);
451 pr_err("pblk: could not map vmalloc bio\n");
453 bio = ERR_PTR(-ENOMEM);
457 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
458 if (ret != PAGE_SIZE) {
459 pr_err("pblk: could not add page to bio\n");
461 bio = ERR_PTR(-ENOMEM);
471 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
472 unsigned long secs_to_flush)
474 int max = pblk->sec_per_write;
475 int min = pblk->min_write_pgs;
476 int secs_to_sync = 0;
478 if (secs_avail >= max)
480 else if (secs_avail >= min)
481 secs_to_sync = min * (secs_avail / min);
482 else if (secs_to_flush)
488 void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
493 addr = find_next_zero_bit(line->map_bitmap,
494 pblk->lm.sec_per_line, line->cur_sec);
495 line->cur_sec = addr - nr_secs;
497 for (i = 0; i < nr_secs; i++, line->cur_sec--)
498 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
501 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
506 /* logic error: ppa out-of-bounds. Prevent generating bad address */
507 if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
508 WARN(1, "pblk: page allocation out of bounds\n");
509 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
512 line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
513 pblk->lm.sec_per_line, line->cur_sec);
514 for (i = 0; i < nr_secs; i++, line->cur_sec++)
515 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
520 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
524 /* Lock needed in case a write fails and a recovery needs to remap
525 * failed write buffer entries
527 spin_lock(&line->lock);
528 addr = __pblk_alloc_page(pblk, line, nr_secs);
529 line->left_msecs -= nr_secs;
530 WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
531 spin_unlock(&line->lock);
536 u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
540 spin_lock(&line->lock);
541 paddr = find_next_zero_bit(line->map_bitmap,
542 pblk->lm.sec_per_line, line->cur_sec);
543 spin_unlock(&line->lock);
549 * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
550 * taking the per LUN semaphore.
552 static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
553 void *emeta_buf, u64 paddr, int dir)
555 struct nvm_tgt_dev *dev = pblk->dev;
556 struct nvm_geo *geo = &dev->geo;
557 struct pblk_line_meta *lm = &pblk->lm;
560 struct ppa_addr *ppa_list;
561 dma_addr_t dma_ppa_list;
562 int min = pblk->min_write_pgs;
563 int left_ppas = lm->emeta_sec[0];
569 DECLARE_COMPLETION_ONSTACK(wait);
572 bio_op = REQ_OP_WRITE;
573 cmd_op = NVM_OP_PWRITE;
574 } else if (dir == READ) {
575 bio_op = REQ_OP_READ;
576 cmd_op = NVM_OP_PREAD;
580 ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_ppa_list);
585 memset(&rqd, 0, sizeof(struct nvm_rq));
587 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
588 rq_len = rq_ppas * geo->sec_size;
590 bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len, GFP_KERNEL);
596 bio->bi_iter.bi_sector = 0; /* internal bio */
597 bio_set_op_attrs(bio, bio_op, 0);
601 rqd.nr_ppas = rq_ppas;
602 rqd.ppa_list = ppa_list;
603 rqd.dma_ppa_list = dma_ppa_list;
604 rqd.end_io = pblk_end_io_sync;
608 rqd.flags = pblk_set_progr_mode(pblk, WRITE);
609 for (i = 0; i < rqd.nr_ppas; ) {
610 spin_lock(&line->lock);
611 paddr = __pblk_alloc_page(pblk, line, min);
612 spin_unlock(&line->lock);
613 for (j = 0; j < min; j++, i++, paddr++)
615 addr_to_gen_ppa(pblk, paddr, id);
618 for (i = 0; i < rqd.nr_ppas; ) {
619 struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
620 int pos = pblk_dev_ppa_to_pos(geo, ppa);
621 int read_type = PBLK_READ_RANDOM;
623 if (pblk_io_aligned(pblk, rq_ppas))
624 read_type = PBLK_READ_SEQUENTIAL;
625 rqd.flags = pblk_set_read_mode(pblk, read_type);
627 while (test_bit(pos, line->blk_bitmap)) {
629 if (pblk_boundary_paddr_checks(pblk, paddr)) {
630 pr_err("pblk: corrupt emeta line:%d\n",
637 ppa = addr_to_gen_ppa(pblk, paddr, id);
638 pos = pblk_dev_ppa_to_pos(geo, ppa);
641 if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
642 pr_err("pblk: corrupt emeta line:%d\n",
649 for (j = 0; j < min; j++, i++, paddr++)
651 addr_to_gen_ppa(pblk, paddr, line->id);
655 ret = pblk_submit_io(pblk, &rqd);
657 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
662 if (!wait_for_completion_io_timeout(&wait,
663 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
664 pr_err("pblk: emeta I/O timed out\n");
666 reinit_completion(&wait);
672 pblk_log_write_err(pblk, &rqd);
674 pblk_log_read_err(pblk, &rqd);
678 left_ppas -= rq_ppas;
682 nvm_dev_dma_free(dev->parent, ppa_list, dma_ppa_list);
686 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
688 struct nvm_tgt_dev *dev = pblk->dev;
689 struct nvm_geo *geo = &dev->geo;
690 struct pblk_line_meta *lm = &pblk->lm;
693 /* This usually only happens on bad lines */
694 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
695 if (bit >= lm->blk_per_line)
698 return bit * geo->sec_per_pl;
701 static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
704 struct nvm_tgt_dev *dev = pblk->dev;
705 struct pblk_line_meta *lm = &pblk->lm;
708 __le64 *lba_list = NULL;
712 DECLARE_COMPLETION_ONSTACK(wait);
715 bio_op = REQ_OP_WRITE;
716 cmd_op = NVM_OP_PWRITE;
717 flags = pblk_set_progr_mode(pblk, WRITE);
718 lba_list = emeta_to_lbas(pblk, line->emeta->buf);
719 } else if (dir == READ) {
720 bio_op = REQ_OP_READ;
721 cmd_op = NVM_OP_PREAD;
722 flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
726 memset(&rqd, 0, sizeof(struct nvm_rq));
728 rqd.ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
733 bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
739 bio->bi_iter.bi_sector = 0; /* internal bio */
740 bio_set_op_attrs(bio, bio_op, 0);
745 rqd.nr_ppas = lm->smeta_sec;
746 rqd.end_io = pblk_end_io_sync;
749 for (i = 0; i < lm->smeta_sec; i++, paddr++) {
750 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
752 lba_list[paddr] = cpu_to_le64(ADDR_EMPTY);
756 * This I/O is sent by the write thread when a line is replace. Since
757 * the write thread is the only one sending write and erase commands,
758 * there is no need to take the LUN semaphore.
760 ret = pblk_submit_io(pblk, &rqd);
762 pr_err("pblk: smeta I/O submission failed: %d\n", ret);
767 if (!wait_for_completion_io_timeout(&wait,
768 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
769 pr_err("pblk: smeta I/O timed out\n");
774 pblk_log_write_err(pblk, &rqd);
776 pblk_log_read_err(pblk, &rqd);
780 nvm_dev_dma_free(dev->parent, rqd.ppa_list, rqd.dma_ppa_list);
785 int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
787 u64 bpaddr = pblk_line_smeta_start(pblk, line);
789 return pblk_line_submit_smeta_io(pblk, line, bpaddr, READ);
792 int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
795 return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
796 line->emeta_ssec, READ);
799 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
802 rqd->opcode = NVM_OP_ERASE;
805 rqd->flags = pblk_set_progr_mode(pblk, ERASE);
809 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
813 DECLARE_COMPLETION_ONSTACK(wait);
815 memset(&rqd, 0, sizeof(struct nvm_rq));
817 pblk_setup_e_rq(pblk, &rqd, ppa);
819 rqd.end_io = pblk_end_io_sync;
822 /* The write thread schedules erases so that it minimizes disturbances
823 * with writes. Thus, there is no need to take the LUN semaphore.
825 ret = pblk_submit_io(pblk, &rqd);
827 struct nvm_tgt_dev *dev = pblk->dev;
828 struct nvm_geo *geo = &dev->geo;
830 pr_err("pblk: could not sync erase line:%d,blk:%d\n",
831 pblk_dev_ppa_to_line(ppa),
832 pblk_dev_ppa_to_pos(geo, ppa));
838 if (!wait_for_completion_io_timeout(&wait,
839 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
840 pr_err("pblk: sync erase timed out\n");
845 __pblk_end_io_erase(pblk, &rqd);
850 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
852 struct pblk_line_meta *lm = &pblk->lm;
856 /* Erase only good blocks, one at a time */
858 spin_lock(&line->lock);
859 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
861 if (bit >= lm->blk_per_line) {
862 spin_unlock(&line->lock);
866 ppa = pblk->luns[bit].bppa; /* set ch and lun */
867 ppa.g.blk = line->id;
869 atomic_dec(&line->left_eblks);
870 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
871 spin_unlock(&line->lock);
873 if (pblk_blk_erase_sync(pblk, ppa)) {
874 pr_err("pblk: failed to erase line %d\n", line->id);
882 static void pblk_line_setup_metadata(struct pblk_line *line,
883 struct pblk_line_mgmt *l_mg,
884 struct pblk_line_meta *lm)
889 meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
890 if (meta_line == PBLK_DATA_LINES) {
891 spin_unlock(&l_mg->free_lock);
893 spin_lock(&l_mg->free_lock);
897 set_bit(meta_line, &l_mg->meta_bitmap);
898 line->meta_line = meta_line;
900 line->smeta = l_mg->sline_meta[meta_line];
901 line->emeta = l_mg->eline_meta[meta_line];
903 memset(line->smeta, 0, lm->smeta_len);
904 memset(line->emeta->buf, 0, lm->emeta_len[0]);
906 line->emeta->mem = 0;
907 atomic_set(&line->emeta->sync, 0);
910 /* For now lines are always assumed full lines. Thus, smeta former and current
911 * lun bitmaps are omitted.
913 static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
914 struct pblk_line *cur)
916 struct nvm_tgt_dev *dev = pblk->dev;
917 struct nvm_geo *geo = &dev->geo;
918 struct pblk_line_meta *lm = &pblk->lm;
919 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
920 struct pblk_emeta *emeta = line->emeta;
921 struct line_emeta *emeta_buf = emeta->buf;
922 struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
925 /* After erasing the line, new bad blocks might appear and we risk
926 * having an invalid line
928 nr_blk_line = lm->blk_per_line -
929 bitmap_weight(line->blk_bitmap, lm->blk_per_line);
930 if (nr_blk_line < lm->min_blk_line) {
931 spin_lock(&l_mg->free_lock);
932 spin_lock(&line->lock);
933 line->state = PBLK_LINESTATE_BAD;
934 spin_unlock(&line->lock);
936 list_add_tail(&line->list, &l_mg->bad_list);
937 spin_unlock(&l_mg->free_lock);
939 pr_debug("pblk: line %d is bad\n", line->id);
944 /* Run-time metadata */
945 line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
947 /* Mark LUNs allocated in this line (all for now) */
948 bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
950 smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
951 memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
952 smeta_buf->header.id = cpu_to_le32(line->id);
953 smeta_buf->header.type = cpu_to_le16(line->type);
954 smeta_buf->header.version = cpu_to_le16(1);
957 smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
958 smeta_buf->window_wr_lun = cpu_to_le32(geo->nr_luns);
960 /* Fill metadata among lines */
962 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
963 smeta_buf->prev_id = cpu_to_le32(cur->id);
964 cur->emeta->buf->next_id = cpu_to_le32(line->id);
966 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
969 /* All smeta must be set at this point */
970 smeta_buf->header.crc = cpu_to_le32(
971 pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
972 smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
975 memcpy(&emeta_buf->header, &smeta_buf->header,
976 sizeof(struct line_header));
977 emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
978 emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
979 emeta_buf->nr_valid_lbas = cpu_to_le64(0);
980 emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
981 emeta_buf->crc = cpu_to_le32(0);
982 emeta_buf->prev_id = smeta_buf->prev_id;
987 /* For now lines are always assumed full lines. Thus, smeta former and current
988 * lun bitmaps are omitted.
990 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
993 struct nvm_tgt_dev *dev = pblk->dev;
994 struct nvm_geo *geo = &dev->geo;
995 struct pblk_line_meta *lm = &pblk->lm;
996 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1001 line->sec_in_line = lm->sec_per_line;
1003 /* Capture bad block information on line mapping bitmaps */
1004 while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1005 bit + 1)) < lm->blk_per_line) {
1006 off = bit * geo->sec_per_pl;
1007 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1009 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1011 line->sec_in_line -= geo->sec_per_blk;
1012 if (bit >= lm->emeta_bb)
1016 /* Mark smeta metadata sectors as bad sectors */
1017 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1018 off = bit * geo->sec_per_pl;
1020 bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1021 line->sec_in_line -= lm->smeta_sec;
1022 line->smeta_ssec = off;
1023 line->cur_sec = off + lm->smeta_sec;
1025 if (init && pblk_line_submit_smeta_io(pblk, line, off, WRITE)) {
1026 pr_debug("pblk: line smeta I/O failed. Retry\n");
1027 off += geo->sec_per_pl;
1031 bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1033 /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1034 * blocks to make sure that there are enough sectors to store emeta
1036 bit = lm->sec_per_line;
1037 off = lm->sec_per_line - lm->emeta_sec[0];
1038 bitmap_set(line->invalid_bitmap, off, lm->emeta_sec[0]);
1040 off -= geo->sec_per_pl;
1041 if (!test_bit(off, line->invalid_bitmap)) {
1042 bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl);
1047 line->sec_in_line -= lm->emeta_sec[0];
1048 line->emeta_ssec = off;
1049 line->nr_valid_lbas = 0;
1050 line->left_msecs = line->sec_in_line;
1051 *line->vsc = cpu_to_le32(line->sec_in_line);
1053 if (lm->sec_per_line - line->sec_in_line !=
1054 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1055 spin_lock(&line->lock);
1056 line->state = PBLK_LINESTATE_BAD;
1057 spin_unlock(&line->lock);
1059 list_add_tail(&line->list, &l_mg->bad_list);
1060 pr_err("pblk: unexpected line %d is bad\n", line->id);
1068 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1070 struct pblk_line_meta *lm = &pblk->lm;
1071 int blk_in_line = atomic_read(&line->blk_in_line);
1073 line->map_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1074 if (!line->map_bitmap)
1076 memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1078 /* invalid_bitmap is special since it is used when line is closed. No
1079 * need to zeroized; it will be initialized using bb info form
1082 line->invalid_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1083 if (!line->invalid_bitmap) {
1084 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1088 spin_lock(&line->lock);
1089 if (line->state != PBLK_LINESTATE_FREE) {
1090 spin_unlock(&line->lock);
1091 WARN(1, "pblk: corrupted line state\n");
1094 line->state = PBLK_LINESTATE_OPEN;
1096 atomic_set(&line->left_eblks, blk_in_line);
1097 atomic_set(&line->left_seblks, blk_in_line);
1099 line->meta_distance = lm->meta_distance;
1100 spin_unlock(&line->lock);
1102 /* Bad blocks do not need to be erased */
1103 bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1105 kref_init(&line->ref);
1110 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1112 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1115 spin_lock(&l_mg->free_lock);
1116 l_mg->data_line = line;
1117 list_del(&line->list);
1119 ret = pblk_line_prepare(pblk, line);
1121 list_add(&line->list, &l_mg->free_list);
1122 spin_unlock(&l_mg->free_lock);
1125 spin_unlock(&l_mg->free_lock);
1127 pblk_rl_free_lines_dec(&pblk->rl, line);
1129 if (!pblk_line_init_bb(pblk, line, 0)) {
1130 list_add(&line->list, &l_mg->free_list);
1137 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1139 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1140 line->map_bitmap = NULL;
1145 struct pblk_line *pblk_line_get(struct pblk *pblk)
1147 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1148 struct pblk_line_meta *lm = &pblk->lm;
1149 struct pblk_line *line = NULL;
1152 lockdep_assert_held(&l_mg->free_lock);
1155 if (list_empty(&l_mg->free_list)) {
1156 pr_err("pblk: no free lines\n");
1160 line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1161 list_del(&line->list);
1162 l_mg->nr_free_lines--;
1164 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1165 if (unlikely(bit >= lm->blk_per_line)) {
1166 spin_lock(&line->lock);
1167 line->state = PBLK_LINESTATE_BAD;
1168 spin_unlock(&line->lock);
1170 list_add_tail(&line->list, &l_mg->bad_list);
1172 pr_debug("pblk: line %d is bad\n", line->id);
1176 if (pblk_line_prepare(pblk, line)) {
1177 pr_err("pblk: failed to prepare line %d\n", line->id);
1178 list_add(&line->list, &l_mg->free_list);
1186 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1187 struct pblk_line *line)
1189 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1190 struct pblk_line *retry_line;
1192 spin_lock(&l_mg->free_lock);
1193 retry_line = pblk_line_get(pblk);
1195 l_mg->data_line = NULL;
1196 spin_unlock(&l_mg->free_lock);
1200 retry_line->smeta = line->smeta;
1201 retry_line->emeta = line->emeta;
1202 retry_line->meta_line = line->meta_line;
1204 pblk_line_free(pblk, line);
1205 l_mg->data_line = retry_line;
1206 spin_unlock(&l_mg->free_lock);
1208 if (pblk_line_erase(pblk, retry_line)) {
1209 spin_lock(&l_mg->free_lock);
1210 l_mg->data_line = NULL;
1211 spin_unlock(&l_mg->free_lock);
1215 pblk_rl_free_lines_dec(&pblk->rl, retry_line);
1220 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1222 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1223 struct pblk_line *line;
1226 spin_lock(&l_mg->free_lock);
1227 line = pblk_line_get(pblk);
1229 spin_unlock(&l_mg->free_lock);
1233 line->seq_nr = l_mg->d_seq_nr++;
1234 line->type = PBLK_LINETYPE_DATA;
1235 l_mg->data_line = line;
1237 pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1239 /* Allocate next line for preparation */
1240 l_mg->data_next = pblk_line_get(pblk);
1241 if (l_mg->data_next) {
1242 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1243 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1246 spin_unlock(&l_mg->free_lock);
1248 pblk_rl_free_lines_dec(&pblk->rl, line);
1250 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1252 if (pblk_line_erase(pblk, line))
1256 if (!pblk_line_init_metadata(pblk, line, NULL)) {
1257 line = pblk_line_retry(pblk, line);
1264 if (!pblk_line_init_bb(pblk, line, 1)) {
1265 line = pblk_line_retry(pblk, line);
1275 struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1277 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1278 struct pblk_line *cur, *new;
1279 unsigned int left_seblks;
1282 cur = l_mg->data_line;
1283 new = l_mg->data_next;
1286 l_mg->data_line = new;
1289 left_seblks = atomic_read(&new->left_seblks);
1291 /* If line is not fully erased, erase it */
1292 if (atomic_read(&new->left_eblks)) {
1293 if (pblk_line_erase(pblk, new))
1301 spin_lock(&l_mg->free_lock);
1302 /* Allocate next line for preparation */
1303 l_mg->data_next = pblk_line_get(pblk);
1304 if (l_mg->data_next) {
1305 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1306 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1310 pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1311 spin_unlock(&l_mg->free_lock);
1314 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1317 if (!pblk_line_init_metadata(pblk, new, cur)) {
1318 new = pblk_line_retry(pblk, new);
1325 if (!pblk_line_init_bb(pblk, new, 1)) {
1326 new = pblk_line_retry(pblk, new);
1336 void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
1338 if (line->map_bitmap)
1339 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1340 if (line->invalid_bitmap)
1341 mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
1343 *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1345 line->map_bitmap = NULL;
1346 line->invalid_bitmap = NULL;
1351 void pblk_line_put(struct kref *ref)
1353 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1354 struct pblk *pblk = line->pblk;
1355 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1357 spin_lock(&line->lock);
1358 WARN_ON(line->state != PBLK_LINESTATE_GC);
1359 line->state = PBLK_LINESTATE_FREE;
1360 line->gc_group = PBLK_LINEGC_NONE;
1361 pblk_line_free(pblk, line);
1362 spin_unlock(&line->lock);
1364 spin_lock(&l_mg->free_lock);
1365 list_add_tail(&line->list, &l_mg->free_list);
1366 l_mg->nr_free_lines++;
1367 spin_unlock(&l_mg->free_lock);
1369 pblk_rl_free_lines_inc(&pblk->rl, line);
1372 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1377 rqd = mempool_alloc(pblk->g_rq_pool, GFP_KERNEL);
1378 memset(rqd, 0, pblk_g_rq_size);
1380 pblk_setup_e_rq(pblk, rqd, ppa);
1382 rqd->end_io = pblk_end_io_erase;
1383 rqd->private = pblk;
1385 /* The write thread schedules erases so that it minimizes disturbances
1386 * with writes. Thus, there is no need to take the LUN semaphore.
1388 err = pblk_submit_io(pblk, rqd);
1390 struct nvm_tgt_dev *dev = pblk->dev;
1391 struct nvm_geo *geo = &dev->geo;
1393 pr_err("pblk: could not async erase line:%d,blk:%d\n",
1394 pblk_dev_ppa_to_line(ppa),
1395 pblk_dev_ppa_to_pos(geo, ppa));
1401 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1403 return pblk->l_mg.data_line;
1406 /* For now, always erase next line */
1407 struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1409 return pblk->l_mg.data_next;
1412 int pblk_line_is_full(struct pblk_line *line)
1414 return (line->left_msecs == 0);
1417 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1419 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1420 struct pblk_line_meta *lm = &pblk->lm;
1421 struct list_head *move_list;
1423 WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1424 "pblk: corrupt closed line %d\n", line->id);
1426 spin_lock(&l_mg->free_lock);
1427 WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1428 spin_unlock(&l_mg->free_lock);
1430 spin_lock(&l_mg->gc_lock);
1431 spin_lock(&line->lock);
1432 WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1433 line->state = PBLK_LINESTATE_CLOSED;
1434 move_list = pblk_line_gc_list(pblk, line);
1436 list_add_tail(&line->list, move_list);
1438 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1439 line->map_bitmap = NULL;
1443 spin_unlock(&line->lock);
1444 spin_unlock(&l_mg->gc_lock);
1447 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1449 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1450 struct pblk_line_meta *lm = &pblk->lm;
1451 struct pblk_emeta *emeta = line->emeta;
1452 struct line_emeta *emeta_buf = emeta->buf;
1454 /* No need for exact vsc value; avoid a big line lock and tak aprox. */
1455 memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1456 memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1458 emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1459 emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1461 spin_lock(&l_mg->close_lock);
1462 spin_lock(&line->lock);
1463 list_add_tail(&line->list, &l_mg->emeta_list);
1464 spin_unlock(&line->lock);
1465 spin_unlock(&l_mg->close_lock);
1468 void pblk_line_close_ws(struct work_struct *work)
1470 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1472 struct pblk *pblk = line_ws->pblk;
1473 struct pblk_line *line = line_ws->line;
1475 pblk_line_close(pblk, line);
1476 mempool_free(line_ws, pblk->line_ws_pool);
1479 void pblk_line_mark_bb(struct work_struct *work)
1481 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1483 struct pblk *pblk = line_ws->pblk;
1484 struct nvm_tgt_dev *dev = pblk->dev;
1485 struct ppa_addr *ppa = line_ws->priv;
1488 ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
1490 struct pblk_line *line;
1493 line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
1494 pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
1496 pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
1501 mempool_free(line_ws, pblk->line_ws_pool);
1504 void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1505 void (*work)(struct work_struct *))
1507 struct pblk_line_ws *line_ws;
1509 line_ws = mempool_alloc(pblk->line_ws_pool, GFP_ATOMIC);
1513 line_ws->pblk = pblk;
1514 line_ws->line = line;
1515 line_ws->priv = priv;
1517 INIT_WORK(&line_ws->ws, work);
1518 queue_work(pblk->kw_wq, &line_ws->ws);
1521 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1522 unsigned long *lun_bitmap)
1524 struct nvm_tgt_dev *dev = pblk->dev;
1525 struct nvm_geo *geo = &dev->geo;
1526 struct pblk_lun *rlun;
1527 int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1531 * Only send one inflight I/O per LUN. Since we map at a page
1532 * granurality, all ppas in the I/O will map to the same LUN
1534 #ifdef CONFIG_NVM_DEBUG
1537 for (i = 1; i < nr_ppas; i++)
1538 WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
1539 ppa_list[0].g.ch != ppa_list[i].g.ch);
1541 /* If the LUN has been locked for this same request, do no attempt to
1544 if (test_and_set_bit(pos, lun_bitmap))
1547 rlun = &pblk->luns[pos];
1548 ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
1552 pr_err("pblk: lun semaphore timed out\n");
1555 pr_err("pblk: lun semaphore timed out\n");
1561 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1562 unsigned long *lun_bitmap)
1564 struct nvm_tgt_dev *dev = pblk->dev;
1565 struct nvm_geo *geo = &dev->geo;
1566 struct pblk_lun *rlun;
1567 int nr_luns = geo->nr_luns;
1570 while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
1571 rlun = &pblk->luns[bit];
1578 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1580 struct ppa_addr l2p_ppa;
1582 /* logic error: lba out-of-bounds. Ignore update */
1583 if (!(lba < pblk->rl.nr_secs)) {
1584 WARN(1, "pblk: corrupted L2P map request\n");
1588 spin_lock(&pblk->trans_lock);
1589 l2p_ppa = pblk_trans_map_get(pblk, lba);
1591 if (!pblk_addr_in_cache(l2p_ppa) && !pblk_ppa_empty(l2p_ppa))
1592 pblk_map_invalidate(pblk, l2p_ppa);
1594 pblk_trans_map_set(pblk, lba, ppa);
1595 spin_unlock(&pblk->trans_lock);
1598 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1600 #ifdef CONFIG_NVM_DEBUG
1601 /* Callers must ensure that the ppa points to a cache address */
1602 BUG_ON(!pblk_addr_in_cache(ppa));
1603 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1606 pblk_update_map(pblk, lba, ppa);
1609 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1610 struct pblk_line *gc_line)
1612 struct ppa_addr l2p_ppa;
1615 #ifdef CONFIG_NVM_DEBUG
1616 /* Callers must ensure that the ppa points to a cache address */
1617 BUG_ON(!pblk_addr_in_cache(ppa));
1618 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1621 /* logic error: lba out-of-bounds. Ignore update */
1622 if (!(lba < pblk->rl.nr_secs)) {
1623 WARN(1, "pblk: corrupted L2P map request\n");
1627 spin_lock(&pblk->trans_lock);
1628 l2p_ppa = pblk_trans_map_get(pblk, lba);
1630 /* Prevent updated entries to be overwritten by GC */
1631 if (pblk_addr_in_cache(l2p_ppa) || pblk_ppa_empty(l2p_ppa) ||
1632 pblk_tgt_ppa_to_line(l2p_ppa) != gc_line->id) {
1637 pblk_trans_map_set(pblk, lba, ppa);
1639 spin_unlock(&pblk->trans_lock);
1643 void pblk_update_map_dev(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1644 struct ppa_addr entry_line)
1646 struct ppa_addr l2p_line;
1648 #ifdef CONFIG_NVM_DEBUG
1649 /* Callers must ensure that the ppa points to a device address */
1650 BUG_ON(pblk_addr_in_cache(ppa));
1652 /* Invalidate and discard padded entries */
1653 if (lba == ADDR_EMPTY) {
1654 #ifdef CONFIG_NVM_DEBUG
1655 atomic_long_inc(&pblk->padded_wb);
1657 pblk_map_invalidate(pblk, ppa);
1661 /* logic error: lba out-of-bounds. Ignore update */
1662 if (!(lba < pblk->rl.nr_secs)) {
1663 WARN(1, "pblk: corrupted L2P map request\n");
1667 spin_lock(&pblk->trans_lock);
1668 l2p_line = pblk_trans_map_get(pblk, lba);
1670 /* Do not update L2P if the cacheline has been updated. In this case,
1671 * the mapped ppa must be invalidated
1673 if (l2p_line.ppa != entry_line.ppa) {
1674 if (!pblk_ppa_empty(ppa))
1675 pblk_map_invalidate(pblk, ppa);
1679 #ifdef CONFIG_NVM_DEBUG
1680 WARN_ON(!pblk_addr_in_cache(l2p_line) && !pblk_ppa_empty(l2p_line));
1683 pblk_trans_map_set(pblk, lba, ppa);
1685 spin_unlock(&pblk->trans_lock);
1688 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
1689 sector_t blba, int nr_secs)
1693 spin_lock(&pblk->trans_lock);
1694 for (i = 0; i < nr_secs; i++)
1695 ppas[i] = pblk_trans_map_get(pblk, blba + i);
1696 spin_unlock(&pblk->trans_lock);
1699 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
1700 u64 *lba_list, int nr_secs)
1705 spin_lock(&pblk->trans_lock);
1706 for (i = 0; i < nr_secs; i++) {
1708 if (lba == ADDR_EMPTY) {
1709 ppas[i].ppa = ADDR_EMPTY;
1711 /* logic error: lba out-of-bounds. Ignore update */
1712 if (!(lba < pblk->rl.nr_secs)) {
1713 WARN(1, "pblk: corrupted L2P map request\n");
1716 ppas[i] = pblk_trans_map_get(pblk, lba);
1719 spin_unlock(&pblk->trans_lock);