2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * pblk-write.c - pblk's write path from write buffer to media
20 static void pblk_sync_line(struct pblk *pblk, struct pblk_line *line)
22 #ifdef CONFIG_NVM_DEBUG
23 atomic_long_inc(&pblk->sync_writes);
26 /* Counter protected by rb sync lock */
30 static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
31 struct pblk_c_ctx *c_ctx)
33 struct nvm_tgt_dev *dev = pblk->dev;
34 struct bio *original_bio;
38 for (i = 0; i < c_ctx->nr_valid; i++) {
39 struct pblk_w_ctx *w_ctx;
41 struct pblk_line *line;
43 w_ctx = pblk_rb_w_ctx(&pblk->rwb, c_ctx->sentry + i);
46 line = &pblk->lines[pblk_dev_ppa_to_line(p)];
47 pblk_sync_line(pblk, line);
49 while ((original_bio = bio_list_pop(&w_ctx->bios)))
50 bio_endio(original_bio);
53 #ifdef CONFIG_NVM_DEBUG
54 atomic_long_add(c_ctx->nr_valid, &pblk->compl_writes);
57 ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
60 nvm_dev_dma_free(dev->parent, rqd->meta_list,
64 pblk_free_rqd(pblk, rqd, WRITE);
69 static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
71 struct pblk_c_ctx *c_ctx)
73 list_del(&c_ctx->list);
74 return pblk_end_w_bio(pblk, rqd, c_ctx);
77 static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
78 struct pblk_c_ctx *c_ctx)
80 struct pblk_c_ctx *c, *r;
84 #ifdef CONFIG_NVM_DEBUG
85 atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
88 pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
90 pos = pblk_rb_sync_init(&pblk->rwb, &flags);
91 if (pos == c_ctx->sentry) {
92 pos = pblk_end_w_bio(pblk, rqd, c_ctx);
95 list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
96 rqd = nvm_rq_from_c_ctx(c);
97 if (c->sentry == pos) {
98 pos = pblk_end_queued_w_bio(pblk, rqd, c);
103 WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
104 list_add_tail(&c_ctx->list, &pblk->compl_list);
106 pblk_rb_sync_end(&pblk->rwb, &flags);
109 /* When a write fails, we are not sure whether the block has grown bad or a page
110 * range is more susceptible to write errors. If a high number of pages fail, we
111 * assume that the block is bad and we mark it accordingly. In all cases, we
112 * remap and resubmit the failed entries as fast as possible; if a flush is
113 * waiting on a completion, the whole stack would stall otherwise.
115 static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
117 void *comp_bits = &rqd->ppa_status;
118 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
119 struct pblk_rec_ctx *recovery;
120 struct ppa_addr *ppa_list = rqd->ppa_list;
121 int nr_ppas = rqd->nr_ppas;
122 unsigned int c_entries;
125 if (unlikely(nr_ppas == 1))
126 ppa_list = &rqd->ppa_addr;
128 recovery = mempool_alloc(pblk->rec_pool, GFP_ATOMIC);
130 pr_err("pblk: could not allocate recovery context\n");
133 INIT_LIST_HEAD(&recovery->failed);
136 while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
137 struct pblk_rb_entry *entry;
141 if (bit > c_ctx->nr_valid) {
142 WARN_ONCE(1, "pblk: corrupted write request\n");
143 mempool_free(recovery, pblk->rec_pool);
148 entry = pblk_rb_sync_scan_entry(&pblk->rwb, &ppa);
150 pr_err("pblk: could not scan entry on write failure\n");
151 mempool_free(recovery, pblk->rec_pool);
155 /* The list is filled first and emptied afterwards. No need for
156 * protecting it with a lock
158 list_add_tail(&entry->index, &recovery->failed);
161 c_entries = find_first_bit(comp_bits, nr_ppas);
162 ret = pblk_recov_setup_rq(pblk, c_ctx, recovery, comp_bits, c_entries);
164 pr_err("pblk: could not recover from write failure\n");
165 mempool_free(recovery, pblk->rec_pool);
169 INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
170 queue_work(pblk->kw_wq, &recovery->ws_rec);
173 pblk_complete_write(pblk, rqd, c_ctx);
176 static void pblk_end_io_write(struct nvm_rq *rqd)
178 struct pblk *pblk = rqd->private;
179 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
182 pblk_log_write_err(pblk, rqd);
183 return pblk_end_w_fail(pblk, rqd);
185 #ifdef CONFIG_NVM_DEBUG
187 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
190 pblk_complete_write(pblk, rqd, c_ctx);
193 static void pblk_end_io_write_meta(struct nvm_rq *rqd)
195 struct pblk *pblk = rqd->private;
196 struct nvm_tgt_dev *dev = pblk->dev;
197 struct nvm_geo *geo = &dev->geo;
198 struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
199 struct pblk_line *line = m_ctx->private;
200 struct pblk_emeta *emeta = line->emeta;
201 int pos = pblk_ppa_to_pos(geo, rqd->ppa_list[0]);
202 struct pblk_lun *rlun = &pblk->luns[pos];
208 pblk_log_write_err(pblk, rqd);
209 pr_err("pblk: metadata I/O failed\n");
211 #ifdef CONFIG_NVM_DEBUG
213 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
216 sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
217 if (sync == emeta->nr_entries)
218 pblk_line_run_ws(pblk, line, NULL, pblk_line_close_ws);
221 pblk_free_rqd(pblk, rqd, READ);
224 static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
225 unsigned int nr_secs,
226 nvm_end_io_fn(*end_io))
228 struct nvm_tgt_dev *dev = pblk->dev;
230 /* Setup write request */
231 rqd->opcode = NVM_OP_PWRITE;
232 rqd->nr_ppas = nr_secs;
233 rqd->flags = pblk_set_progr_mode(pblk, WRITE);
235 rqd->end_io = end_io;
237 rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
238 &rqd->dma_meta_list);
242 if (unlikely(nr_secs == 1))
245 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
246 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
251 static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
252 struct pblk_c_ctx *c_ctx, struct ppa_addr *erase_ppa)
254 struct pblk_line_meta *lm = &pblk->lm;
255 struct pblk_line *e_line = pblk_line_get_erase(pblk);
256 unsigned int valid = c_ctx->nr_valid;
257 unsigned int padded = c_ctx->nr_padded;
258 unsigned int nr_secs = valid + padded;
259 unsigned long *lun_bitmap;
262 lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
265 c_ctx->lun_bitmap = lun_bitmap;
267 ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
273 if (likely(!atomic_read(&e_line->left_eblks) || !e_line))
274 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
276 pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
282 int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
283 struct pblk_c_ctx *c_ctx)
285 struct pblk_line_meta *lm = &pblk->lm;
286 unsigned long *lun_bitmap;
289 lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
293 c_ctx->lun_bitmap = lun_bitmap;
295 ret = pblk_alloc_w_rq(pblk, rqd, rqd->nr_ppas, pblk_end_io_write);
299 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, c_ctx->nr_valid, 0);
301 rqd->ppa_status = (u64)0;
302 rqd->flags = pblk_set_progr_mode(pblk, WRITE);
307 static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
308 unsigned int secs_to_flush)
312 secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
314 #ifdef CONFIG_NVM_DEBUG
315 if ((!secs_to_sync && secs_to_flush)
316 || (secs_to_sync < 0)
317 || (secs_to_sync > secs_avail && !secs_to_flush)) {
318 pr_err("pblk: bad sector calculation (a:%d,s:%d,f:%d)\n",
319 secs_avail, secs_to_sync, secs_to_flush);
326 static inline int pblk_valid_meta_ppa(struct pblk *pblk,
327 struct pblk_line *meta_line,
328 struct ppa_addr *ppa_list, int nr_ppas)
330 struct nvm_tgt_dev *dev = pblk->dev;
331 struct nvm_geo *geo = &dev->geo;
332 struct pblk_line *data_line;
333 struct ppa_addr ppa, ppa_opt;
337 data_line = &pblk->lines[pblk_dev_ppa_to_line(ppa_list[0])];
338 paddr = pblk_lookup_page(pblk, meta_line);
339 ppa = addr_to_gen_ppa(pblk, paddr, 0);
341 if (test_bit(pblk_ppa_to_pos(geo, ppa), data_line->blk_bitmap))
344 /* Schedule a metadata I/O that is half the distance from the data I/O
345 * with regards to the number of LUNs forming the pblk instance. This
346 * balances LUN conflicts across every I/O.
348 * When the LUN configuration changes (e.g., due to GC), this distance
349 * can align, which would result on a LUN deadlock. In this case, modify
350 * the distance to not be optimal, but allow metadata I/Os to succeed.
352 ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
353 if (unlikely(ppa_opt.ppa == ppa.ppa)) {
354 data_line->meta_distance--;
358 for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
359 if (ppa_list[i].g.ch == ppa_opt.g.ch &&
360 ppa_list[i].g.lun == ppa_opt.g.lun)
363 if (test_bit(pblk_ppa_to_pos(geo, ppa_opt), data_line->blk_bitmap)) {
364 for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
365 if (ppa_list[i].g.ch == ppa.g.ch &&
366 ppa_list[i].g.lun == ppa.g.lun)
375 int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
377 struct nvm_tgt_dev *dev = pblk->dev;
378 struct nvm_geo *geo = &dev->geo;
379 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
380 struct pblk_line_meta *lm = &pblk->lm;
381 struct pblk_emeta *emeta = meta_line->emeta;
382 struct pblk_g_ctx *m_ctx;
383 struct pblk_lun *rlun;
388 int rq_ppas = pblk->min_write_pgs;
389 int id = meta_line->id;
394 rqd = pblk_alloc_rqd(pblk, READ);
396 pr_err("pblk: cannot allocate write req.\n");
399 m_ctx = nvm_rq_to_pdu(rqd);
400 m_ctx->private = meta_line;
402 rq_len = rq_ppas * geo->sec_size;
403 data = ((void *)emeta->buf) + emeta->mem;
405 bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len, GFP_KERNEL);
410 bio->bi_iter.bi_sector = 0; /* internal bio */
411 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
414 ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
418 for (i = 0; i < rqd->nr_ppas; ) {
419 spin_lock(&meta_line->lock);
420 paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
421 spin_unlock(&meta_line->lock);
422 for (j = 0; j < rq_ppas; j++, i++, paddr++)
423 rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
426 rlun = &pblk->luns[pblk_ppa_to_pos(geo, rqd->ppa_list[0])];
427 ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
429 pr_err("pblk: lun semaphore timed out (%d)\n", ret);
433 emeta->mem += rq_len;
434 if (emeta->mem >= lm->emeta_len[0]) {
435 spin_lock(&l_mg->close_lock);
436 list_del(&meta_line->list);
437 WARN(!bitmap_full(meta_line->map_bitmap, lm->sec_per_line),
438 "pblk: corrupt meta line %d\n", meta_line->id);
439 spin_unlock(&l_mg->close_lock);
442 ret = pblk_submit_io(pblk, rqd);
444 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
451 spin_lock(&l_mg->close_lock);
452 pblk_dealloc_page(pblk, meta_line, rq_ppas);
453 list_add(&meta_line->list, &meta_line->list);
454 spin_unlock(&l_mg->close_lock);
458 pblk_free_rqd(pblk, rqd, READ);
462 static int pblk_sched_meta_io(struct pblk *pblk, struct ppa_addr *prev_list,
465 struct pblk_line_meta *lm = &pblk->lm;
466 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
467 struct pblk_line *meta_line;
469 spin_lock(&l_mg->close_lock);
471 if (list_empty(&l_mg->emeta_list)) {
472 spin_unlock(&l_mg->close_lock);
475 meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
476 if (bitmap_full(meta_line->map_bitmap, lm->sec_per_line))
478 spin_unlock(&l_mg->close_lock);
480 if (!pblk_valid_meta_ppa(pblk, meta_line, prev_list, prev_n))
483 return pblk_submit_meta_io(pblk, meta_line);
486 static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
488 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
489 struct ppa_addr erase_ppa;
492 ppa_set_empty(&erase_ppa);
494 /* Assign lbas to ppas and populate request structure */
495 err = pblk_setup_w_rq(pblk, rqd, c_ctx, &erase_ppa);
497 pr_err("pblk: could not setup write request: %d\n", err);
501 if (likely(ppa_empty(erase_ppa))) {
502 /* Submit metadata write for previous data line */
503 err = pblk_sched_meta_io(pblk, rqd->ppa_list, rqd->nr_ppas);
505 pr_err("pblk: metadata I/O submission failed: %d", err);
509 /* Submit data write for current data line */
510 err = pblk_submit_io(pblk, rqd);
512 pr_err("pblk: data I/O submission failed: %d\n", err);
516 /* Submit data write for current data line */
517 err = pblk_submit_io(pblk, rqd);
519 pr_err("pblk: data I/O submission failed: %d\n", err);
523 /* Submit available erase for next data line */
524 if (pblk_blk_erase_async(pblk, erase_ppa)) {
525 struct pblk_line *e_line = pblk_line_get_erase(pblk);
526 struct nvm_tgt_dev *dev = pblk->dev;
527 struct nvm_geo *geo = &dev->geo;
530 atomic_inc(&e_line->left_eblks);
531 bit = pblk_ppa_to_pos(geo, erase_ppa);
532 WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
539 static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
541 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
542 struct bio *bio = rqd->bio;
544 if (c_ctx->nr_padded)
545 pblk_bio_free_pages(pblk, bio, rqd->nr_ppas, c_ctx->nr_padded);
548 static int pblk_submit_write(struct pblk *pblk)
552 unsigned int secs_avail, secs_to_sync, secs_to_com;
553 unsigned int secs_to_flush;
556 /* If there are no sectors in the cache, flushes (bios without data)
557 * will be cleared on the cache threads
559 secs_avail = pblk_rb_read_count(&pblk->rwb);
563 secs_to_flush = pblk_rb_sync_point_count(&pblk->rwb);
564 if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
567 rqd = pblk_alloc_rqd(pblk, WRITE);
569 pr_err("pblk: cannot allocate write req.\n");
573 bio = bio_alloc(GFP_KERNEL, pblk->max_write_pgs);
575 pr_err("pblk: cannot allocate write bio\n");
578 bio->bi_iter.bi_sector = 0; /* internal bio */
579 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
582 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, secs_to_flush);
583 if (secs_to_sync > pblk->max_write_pgs) {
584 pr_err("pblk: bad buffer sync calculation\n");
588 secs_to_com = (secs_to_sync > secs_avail) ? secs_avail : secs_to_sync;
589 pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
591 if (pblk_rb_read_to_bio(&pblk->rwb, rqd, bio, pos, secs_to_sync,
593 pr_err("pblk: corrupted write bio\n");
597 if (pblk_submit_io_set(pblk, rqd))
600 #ifdef CONFIG_NVM_DEBUG
601 atomic_long_add(secs_to_sync, &pblk->sub_writes);
607 pblk_free_write_rqd(pblk, rqd);
611 pblk_free_rqd(pblk, rqd, WRITE);
616 int pblk_write_ts(void *data)
618 struct pblk *pblk = data;
620 while (!kthread_should_stop()) {
621 if (!pblk_submit_write(pblk))
623 set_current_state(TASK_INTERRUPTIBLE);