]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/lightnvm/pblk-read.c
lightnvm: pblk: delete redundant debug line stat
[karo-tx-linux.git] / drivers / lightnvm / pblk-read.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-read.c - pblk's read path
16  */
17
18 #include "pblk.h"
19
20 /*
21  * There is no guarantee that the value read from cache has not been updated and
22  * resides at another location in the cache. We guarantee though that if the
23  * value is read from the cache, it belongs to the mapped lba. In order to
24  * guarantee and order between writes and reads are ordered, a flush must be
25  * issued.
26  */
27 static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
28                                 sector_t lba, struct ppa_addr ppa,
29                                 int bio_iter)
30 {
31 #ifdef CONFIG_NVM_DEBUG
32         /* Callers must ensure that the ppa points to a cache address */
33         BUG_ON(pblk_ppa_empty(ppa));
34         BUG_ON(!pblk_addr_in_cache(ppa));
35 #endif
36
37         return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba,
38                                         pblk_addr_to_cacheline(ppa), bio_iter);
39 }
40
41 static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
42                                  unsigned long *read_bitmap)
43 {
44         struct bio *bio = rqd->bio;
45         struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
46         sector_t blba = pblk_get_lba(bio);
47         int nr_secs = rqd->nr_ppas;
48         int advanced_bio = 0;
49         int i, j = 0;
50
51         /* logic error: lba out-of-bounds. Ignore read request */
52         if (blba + nr_secs >= pblk->rl.nr_secs) {
53                 WARN(1, "pblk: read lbas out of bounds\n");
54                 return;
55         }
56
57         pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
58
59         for (i = 0; i < nr_secs; i++) {
60                 struct ppa_addr p = ppas[i];
61                 sector_t lba = blba + i;
62
63 retry:
64                 if (pblk_ppa_empty(p)) {
65                         WARN_ON(test_and_set_bit(i, read_bitmap));
66                         continue;
67                 }
68
69                 /* Try to read from write buffer. The address is later checked
70                  * on the write buffer to prevent retrieving overwritten data.
71                  */
72                 if (pblk_addr_in_cache(p)) {
73                         if (!pblk_read_from_cache(pblk, bio, lba, p, i)) {
74                                 pblk_lookup_l2p_seq(pblk, &p, lba, 1);
75                                 goto retry;
76                         }
77                         WARN_ON(test_and_set_bit(i, read_bitmap));
78                         advanced_bio = 1;
79 #ifdef CONFIG_NVM_DEBUG
80                         atomic_long_inc(&pblk->cache_reads);
81 #endif
82                 } else {
83                         /* Read from media non-cached sectors */
84                         rqd->ppa_list[j++] = p;
85                 }
86
87                 if (advanced_bio)
88                         bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
89         }
90
91 #ifdef CONFIG_NVM_DEBUG
92         atomic_long_add(nr_secs, &pblk->inflight_reads);
93 #endif
94 }
95
96 static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd)
97 {
98         int err;
99
100         rqd->flags = pblk_set_read_mode(pblk);
101
102         err = pblk_submit_io(pblk, rqd);
103         if (err)
104                 return NVM_IO_ERR;
105
106         return NVM_IO_OK;
107 }
108
109 static void pblk_end_io_read(struct nvm_rq *rqd)
110 {
111         struct pblk *pblk = rqd->private;
112         struct nvm_tgt_dev *dev = pblk->dev;
113         struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
114         struct bio *bio = rqd->bio;
115
116         if (rqd->error)
117                 pblk_log_read_err(pblk, rqd);
118 #ifdef CONFIG_NVM_DEBUG
119         else
120                 WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n");
121 #endif
122
123         if (rqd->nr_ppas > 1)
124                 nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
125
126         bio_put(bio);
127         if (r_ctx->private) {
128                 struct bio *orig_bio = r_ctx->private;
129
130 #ifdef CONFIG_NVM_DEBUG
131                 WARN_ONCE(orig_bio->bi_status, "pblk: corrupted read bio\n");
132 #endif
133                 bio_endio(orig_bio);
134                 bio_put(orig_bio);
135         }
136
137 #ifdef CONFIG_NVM_DEBUG
138         atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
139         atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
140 #endif
141
142         pblk_free_rqd(pblk, rqd, READ);
143 }
144
145 static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
146                                       unsigned int bio_init_idx,
147                                       unsigned long *read_bitmap)
148 {
149         struct bio *new_bio, *bio = rqd->bio;
150         struct bio_vec src_bv, dst_bv;
151         void *ppa_ptr = NULL;
152         void *src_p, *dst_p;
153         dma_addr_t dma_ppa_list = 0;
154         int nr_secs = rqd->nr_ppas;
155         int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
156         int i, ret, hole;
157         DECLARE_COMPLETION_ONSTACK(wait);
158
159         new_bio = bio_alloc(GFP_KERNEL, nr_holes);
160         if (!new_bio) {
161                 pr_err("pblk: could not alloc read bio\n");
162                 return NVM_IO_ERR;
163         }
164
165         if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
166                 goto err;
167
168         if (nr_holes != new_bio->bi_vcnt) {
169                 pr_err("pblk: malformed bio\n");
170                 goto err;
171         }
172
173         new_bio->bi_iter.bi_sector = 0; /* internal bio */
174         bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
175         new_bio->bi_private = &wait;
176         new_bio->bi_end_io = pblk_end_bio_sync;
177
178         rqd->bio = new_bio;
179         rqd->nr_ppas = nr_holes;
180         rqd->end_io = NULL;
181
182         if (unlikely(nr_secs > 1 && nr_holes == 1)) {
183                 ppa_ptr = rqd->ppa_list;
184                 dma_ppa_list = rqd->dma_ppa_list;
185                 rqd->ppa_addr = rqd->ppa_list[0];
186         }
187
188         ret = pblk_submit_read_io(pblk, rqd);
189         if (ret) {
190                 bio_put(rqd->bio);
191                 pr_err("pblk: read IO submission failed\n");
192                 goto err;
193         }
194
195         if (!wait_for_completion_io_timeout(&wait,
196                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
197                 pr_err("pblk: partial read I/O timed out\n");
198         }
199
200         if (rqd->error) {
201                 atomic_long_inc(&pblk->read_failed);
202 #ifdef CONFIG_NVM_DEBUG
203                 pblk_print_failed_rqd(pblk, rqd, rqd->error);
204 #endif
205         }
206
207         if (unlikely(nr_secs > 1 && nr_holes == 1)) {
208                 rqd->ppa_list = ppa_ptr;
209                 rqd->dma_ppa_list = dma_ppa_list;
210         }
211
212         /* Fill the holes in the original bio */
213         i = 0;
214         hole = find_first_zero_bit(read_bitmap, nr_secs);
215         do {
216                 src_bv = new_bio->bi_io_vec[i++];
217                 dst_bv = bio->bi_io_vec[bio_init_idx + hole];
218
219                 src_p = kmap_atomic(src_bv.bv_page);
220                 dst_p = kmap_atomic(dst_bv.bv_page);
221
222                 memcpy(dst_p + dst_bv.bv_offset,
223                         src_p + src_bv.bv_offset,
224                         PBLK_EXPOSED_PAGE_SIZE);
225
226                 kunmap_atomic(src_p);
227                 kunmap_atomic(dst_p);
228
229                 mempool_free(src_bv.bv_page, pblk->page_pool);
230
231                 hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
232         } while (hole < nr_secs);
233
234         bio_put(new_bio);
235
236         /* Complete the original bio and associated request */
237         rqd->bio = bio;
238         rqd->nr_ppas = nr_secs;
239         rqd->private = pblk;
240
241         bio_endio(bio);
242         pblk_end_io_read(rqd);
243         return NVM_IO_OK;
244
245 err:
246         /* Free allocated pages in new bio */
247         pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
248         rqd->private = pblk;
249         pblk_end_io_read(rqd);
250         return NVM_IO_ERR;
251 }
252
253 static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
254                          unsigned long *read_bitmap)
255 {
256         struct bio *bio = rqd->bio;
257         struct ppa_addr ppa;
258         sector_t lba = pblk_get_lba(bio);
259
260         /* logic error: lba out-of-bounds. Ignore read request */
261         if (lba >= pblk->rl.nr_secs) {
262                 WARN(1, "pblk: read lba out of bounds\n");
263                 return;
264         }
265
266         pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
267
268 #ifdef CONFIG_NVM_DEBUG
269         atomic_long_inc(&pblk->inflight_reads);
270 #endif
271
272 retry:
273         if (pblk_ppa_empty(ppa)) {
274                 WARN_ON(test_and_set_bit(0, read_bitmap));
275                 return;
276         }
277
278         /* Try to read from write buffer. The address is later checked on the
279          * write buffer to prevent retrieving overwritten data.
280          */
281         if (pblk_addr_in_cache(ppa)) {
282                 if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0)) {
283                         pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
284                         goto retry;
285                 }
286                 WARN_ON(test_and_set_bit(0, read_bitmap));
287 #ifdef CONFIG_NVM_DEBUG
288                         atomic_long_inc(&pblk->cache_reads);
289 #endif
290         } else {
291                 rqd->ppa_addr = ppa;
292         }
293 }
294
295 int pblk_submit_read(struct pblk *pblk, struct bio *bio)
296 {
297         struct nvm_tgt_dev *dev = pblk->dev;
298         unsigned int nr_secs = pblk_get_secs(bio);
299         struct nvm_rq *rqd;
300         unsigned long read_bitmap; /* Max 64 ppas per request */
301         unsigned int bio_init_idx;
302         int ret = NVM_IO_ERR;
303
304         if (nr_secs > PBLK_MAX_REQ_ADDRS)
305                 return NVM_IO_ERR;
306
307         bitmap_zero(&read_bitmap, nr_secs);
308
309         rqd = pblk_alloc_rqd(pblk, READ);
310         if (IS_ERR(rqd)) {
311                 pr_err_ratelimited("pblk: not able to alloc rqd");
312                 return NVM_IO_ERR;
313         }
314
315         rqd->opcode = NVM_OP_PREAD;
316         rqd->bio = bio;
317         rqd->nr_ppas = nr_secs;
318         rqd->private = pblk;
319         rqd->end_io = pblk_end_io_read;
320
321         /* Save the index for this bio's start. This is needed in case
322          * we need to fill a partial read.
323          */
324         bio_init_idx = pblk_get_bi_idx(bio);
325
326         if (nr_secs > 1) {
327                 rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
328                                                 &rqd->dma_ppa_list);
329                 if (!rqd->ppa_list) {
330                         pr_err("pblk: not able to allocate ppa list\n");
331                         goto fail_rqd_free;
332                 }
333
334                 pblk_read_ppalist_rq(pblk, rqd, &read_bitmap);
335         } else {
336                 pblk_read_rq(pblk, rqd, &read_bitmap);
337         }
338
339         bio_get(bio);
340         if (bitmap_full(&read_bitmap, nr_secs)) {
341                 bio_endio(bio);
342                 pblk_end_io_read(rqd);
343                 return NVM_IO_OK;
344         }
345
346         /* All sectors are to be read from the device */
347         if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
348                 struct bio *int_bio = NULL;
349                 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
350
351                 /* Clone read bio to deal with read errors internally */
352                 int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
353                 if (!int_bio) {
354                         pr_err("pblk: could not clone read bio\n");
355                         return NVM_IO_ERR;
356                 }
357
358                 rqd->bio = int_bio;
359                 r_ctx->private = bio;
360
361                 ret = pblk_submit_read_io(pblk, rqd);
362                 if (ret) {
363                         pr_err("pblk: read IO submission failed\n");
364                         if (int_bio)
365                                 bio_put(int_bio);
366                         return ret;
367                 }
368
369                 return NVM_IO_OK;
370         }
371
372         /* The read bio request could be partially filled by the write buffer,
373          * but there are some holes that need to be read from the drive.
374          */
375         ret = pblk_fill_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
376         if (ret) {
377                 pr_err("pblk: failed to perform partial read\n");
378                 return ret;
379         }
380
381         return NVM_IO_OK;
382
383 fail_rqd_free:
384         pblk_free_rqd(pblk, rqd, READ);
385         return ret;
386 }
387
388 static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
389                               struct pblk_line *line, u64 *lba_list,
390                               unsigned int nr_secs)
391 {
392         struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
393         int valid_secs = 0;
394         int i;
395
396         pblk_lookup_l2p_rand(pblk, ppas, lba_list, nr_secs);
397
398         for (i = 0; i < nr_secs; i++) {
399                 if (pblk_addr_in_cache(ppas[i]) || ppas[i].g.blk != line->id ||
400                                                 pblk_ppa_empty(ppas[i])) {
401                         lba_list[i] = ADDR_EMPTY;
402                         continue;
403                 }
404
405                 rqd->ppa_list[valid_secs++] = ppas[i];
406         }
407
408 #ifdef CONFIG_NVM_DEBUG
409         atomic_long_add(valid_secs, &pblk->inflight_reads);
410 #endif
411         return valid_secs;
412 }
413
414 static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
415                       struct pblk_line *line, sector_t lba)
416 {
417         struct ppa_addr ppa;
418         int valid_secs = 0;
419
420         if (lba == ADDR_EMPTY)
421                 goto out;
422
423         /* logic error: lba out-of-bounds */
424         if (lba >= pblk->rl.nr_secs) {
425                 WARN(1, "pblk: read lba out of bounds\n");
426                 goto out;
427         }
428
429         spin_lock(&pblk->trans_lock);
430         ppa = pblk_trans_map_get(pblk, lba);
431         spin_unlock(&pblk->trans_lock);
432
433         /* Ignore updated values until the moment */
434         if (pblk_addr_in_cache(ppa) || ppa.g.blk != line->id ||
435                                                         pblk_ppa_empty(ppa))
436                 goto out;
437
438         rqd->ppa_addr = ppa;
439         valid_secs = 1;
440
441 #ifdef CONFIG_NVM_DEBUG
442         atomic_long_inc(&pblk->inflight_reads);
443 #endif
444
445 out:
446         return valid_secs;
447 }
448
449 int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
450                         unsigned int nr_secs, unsigned int *secs_to_gc,
451                         struct pblk_line *line)
452 {
453         struct nvm_tgt_dev *dev = pblk->dev;
454         struct nvm_geo *geo = &dev->geo;
455         struct request_queue *q = dev->q;
456         struct bio *bio;
457         struct nvm_rq rqd;
458         int ret, data_len;
459         DECLARE_COMPLETION_ONSTACK(wait);
460
461         memset(&rqd, 0, sizeof(struct nvm_rq));
462
463         if (nr_secs > 1) {
464                 rqd.ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
465                                                         &rqd.dma_ppa_list);
466                 if (!rqd.ppa_list)
467                         return NVM_IO_ERR;
468
469                 *secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, line, lba_list,
470                                                                 nr_secs);
471                 if (*secs_to_gc == 1) {
472                         struct ppa_addr ppa;
473
474                         ppa = rqd.ppa_list[0];
475                         nvm_dev_dma_free(dev->parent, rqd.ppa_list,
476                                                         rqd.dma_ppa_list);
477                         rqd.ppa_addr = ppa;
478                 }
479         } else {
480                 *secs_to_gc = read_rq_gc(pblk, &rqd, line, lba_list[0]);
481         }
482
483         if (!(*secs_to_gc))
484                 goto out;
485
486         data_len = (*secs_to_gc) * geo->sec_size;
487         bio = bio_map_kern(q, data, data_len, GFP_KERNEL);
488         if (IS_ERR(bio)) {
489                 pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
490                 goto err_free_dma;
491         }
492
493         bio->bi_iter.bi_sector = 0; /* internal bio */
494         bio_set_op_attrs(bio, REQ_OP_READ, 0);
495
496         rqd.opcode = NVM_OP_PREAD;
497         rqd.end_io = pblk_end_io_sync;
498         rqd.private = &wait;
499         rqd.nr_ppas = *secs_to_gc;
500         rqd.bio = bio;
501
502         ret = pblk_submit_read_io(pblk, &rqd);
503         if (ret) {
504                 bio_endio(bio);
505                 pr_err("pblk: GC read request failed\n");
506                 goto err_free_dma;
507         }
508
509         if (!wait_for_completion_io_timeout(&wait,
510                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
511                 pr_err("pblk: GC read I/O timed out\n");
512         }
513
514         if (rqd.error) {
515                 atomic_long_inc(&pblk->read_failed_gc);
516 #ifdef CONFIG_NVM_DEBUG
517                 pblk_print_failed_rqd(pblk, &rqd, rqd.error);
518 #endif
519         }
520
521 #ifdef CONFIG_NVM_DEBUG
522         atomic_long_add(*secs_to_gc, &pblk->sync_reads);
523         atomic_long_add(*secs_to_gc, &pblk->recov_gc_reads);
524         atomic_long_sub(*secs_to_gc, &pblk->inflight_reads);
525 #endif
526
527 out:
528         if (rqd.nr_ppas > 1)
529                 nvm_dev_dma_free(dev->parent, rqd.ppa_list, rqd.dma_ppa_list);
530         return NVM_IO_OK;
531
532 err_free_dma:
533         if (rqd.nr_ppas > 1)
534                 nvm_dev_dma_free(dev->parent, rqd.ppa_list, rqd.dma_ppa_list);
535         return NVM_IO_ERR;
536 }