]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/lightnvm/pblk-write.c
lightnvm: pblk: sched. metadata on write thread
[karo-tx-linux.git] / drivers / lightnvm / pblk-write.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-write.c - pblk's write path from write buffer to media
16  */
17
18 #include "pblk.h"
19
20 static void pblk_sync_line(struct pblk *pblk, struct pblk_line *line)
21 {
22 #ifdef CONFIG_NVM_DEBUG
23         atomic_long_inc(&pblk->sync_writes);
24 #endif
25
26         /* Counter protected by rb sync lock */
27         line->left_ssecs--;
28 }
29
30 static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
31                                     struct pblk_c_ctx *c_ctx)
32 {
33         struct nvm_tgt_dev *dev = pblk->dev;
34         struct bio *original_bio;
35         unsigned long ret;
36         int i;
37
38         for (i = 0; i < c_ctx->nr_valid; i++) {
39                 struct pblk_w_ctx *w_ctx;
40                 struct ppa_addr p;
41                 struct pblk_line *line;
42
43                 w_ctx = pblk_rb_w_ctx(&pblk->rwb, c_ctx->sentry + i);
44
45                 p = rqd->ppa_list[i];
46                 line = &pblk->lines[pblk_dev_ppa_to_line(p)];
47                 pblk_sync_line(pblk, line);
48
49                 while ((original_bio = bio_list_pop(&w_ctx->bios)))
50                         bio_endio(original_bio);
51         }
52
53 #ifdef CONFIG_NVM_DEBUG
54         atomic_long_add(c_ctx->nr_valid, &pblk->compl_writes);
55 #endif
56
57         ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
58
59         if (rqd->meta_list)
60                 nvm_dev_dma_free(dev->parent, rqd->meta_list,
61                                                         rqd->dma_meta_list);
62
63         bio_put(rqd->bio);
64         pblk_free_rqd(pblk, rqd, WRITE);
65
66         return ret;
67 }
68
69 static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
70                                            struct nvm_rq *rqd,
71                                            struct pblk_c_ctx *c_ctx)
72 {
73         list_del(&c_ctx->list);
74         return pblk_end_w_bio(pblk, rqd, c_ctx);
75 }
76
77 static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
78                                 struct pblk_c_ctx *c_ctx)
79 {
80         struct pblk_c_ctx *c, *r;
81         unsigned long flags;
82         unsigned long pos;
83
84 #ifdef CONFIG_NVM_DEBUG
85         atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
86 #endif
87
88         pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
89
90         pos = pblk_rb_sync_init(&pblk->rwb, &flags);
91         if (pos == c_ctx->sentry) {
92                 pos = pblk_end_w_bio(pblk, rqd, c_ctx);
93
94 retry:
95                 list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
96                         rqd = nvm_rq_from_c_ctx(c);
97                         if (c->sentry == pos) {
98                                 pos = pblk_end_queued_w_bio(pblk, rqd, c);
99                                 goto retry;
100                         }
101                 }
102         } else {
103                 WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
104                 list_add_tail(&c_ctx->list, &pblk->compl_list);
105         }
106         pblk_rb_sync_end(&pblk->rwb, &flags);
107 }
108
109 /* When a write fails, we are not sure whether the block has grown bad or a page
110  * range is more susceptible to write errors. If a high number of pages fail, we
111  * assume that the block is bad and we mark it accordingly. In all cases, we
112  * remap and resubmit the failed entries as fast as possible; if a flush is
113  * waiting on a completion, the whole stack would stall otherwise.
114  */
115 static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
116 {
117         void *comp_bits = &rqd->ppa_status;
118         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
119         struct pblk_rec_ctx *recovery;
120         struct ppa_addr *ppa_list = rqd->ppa_list;
121         int nr_ppas = rqd->nr_ppas;
122         unsigned int c_entries;
123         int bit, ret;
124
125         if (unlikely(nr_ppas == 1))
126                 ppa_list = &rqd->ppa_addr;
127
128         recovery = mempool_alloc(pblk->rec_pool, GFP_ATOMIC);
129         if (!recovery) {
130                 pr_err("pblk: could not allocate recovery context\n");
131                 return;
132         }
133         INIT_LIST_HEAD(&recovery->failed);
134
135         bit = -1;
136         while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
137                 struct pblk_rb_entry *entry;
138                 struct ppa_addr ppa;
139
140                 /* Logic error */
141                 if (bit > c_ctx->nr_valid) {
142                         WARN_ONCE(1, "pblk: corrupted write request\n");
143                         mempool_free(recovery, pblk->rec_pool);
144                         goto out;
145                 }
146
147                 ppa = ppa_list[bit];
148                 entry = pblk_rb_sync_scan_entry(&pblk->rwb, &ppa);
149                 if (!entry) {
150                         pr_err("pblk: could not scan entry on write failure\n");
151                         mempool_free(recovery, pblk->rec_pool);
152                         goto out;
153                 }
154
155                 /* The list is filled first and emptied afterwards. No need for
156                  * protecting it with a lock
157                  */
158                 list_add_tail(&entry->index, &recovery->failed);
159         }
160
161         c_entries = find_first_bit(comp_bits, nr_ppas);
162         ret = pblk_recov_setup_rq(pblk, c_ctx, recovery, comp_bits, c_entries);
163         if (ret) {
164                 pr_err("pblk: could not recover from write failure\n");
165                 mempool_free(recovery, pblk->rec_pool);
166                 goto out;
167         }
168
169         INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
170         queue_work(pblk->kw_wq, &recovery->ws_rec);
171
172 out:
173         pblk_complete_write(pblk, rqd, c_ctx);
174 }
175
176 static void pblk_end_io_write(struct nvm_rq *rqd)
177 {
178         struct pblk *pblk = rqd->private;
179         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
180
181         if (rqd->error) {
182                 pblk_log_write_err(pblk, rqd);
183                 return pblk_end_w_fail(pblk, rqd);
184         }
185 #ifdef CONFIG_NVM_DEBUG
186         else
187                 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
188 #endif
189
190         pblk_complete_write(pblk, rqd, c_ctx);
191 }
192
193 static void pblk_end_io_write_meta(struct nvm_rq *rqd)
194 {
195         struct pblk *pblk = rqd->private;
196         struct nvm_tgt_dev *dev = pblk->dev;
197         struct nvm_geo *geo = &dev->geo;
198         struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
199         struct pblk_line *line = m_ctx->private;
200         struct pblk_emeta *emeta = line->emeta;
201         int pos = pblk_ppa_to_pos(geo, rqd->ppa_list[0]);
202         struct pblk_lun *rlun = &pblk->luns[pos];
203         int sync;
204
205         up(&rlun->wr_sem);
206
207         if (rqd->error) {
208                 pblk_log_write_err(pblk, rqd);
209                 pr_err("pblk: metadata I/O failed\n");
210         }
211 #ifdef CONFIG_NVM_DEBUG
212         else
213                 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
214 #endif
215
216         sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
217         if (sync == emeta->nr_entries)
218                 pblk_line_run_ws(pblk, line, NULL, pblk_line_close_ws);
219
220         bio_put(rqd->bio);
221         pblk_free_rqd(pblk, rqd, READ);
222 }
223
224 static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
225                            unsigned int nr_secs,
226                            nvm_end_io_fn(*end_io))
227 {
228         struct nvm_tgt_dev *dev = pblk->dev;
229
230         /* Setup write request */
231         rqd->opcode = NVM_OP_PWRITE;
232         rqd->nr_ppas = nr_secs;
233         rqd->flags = pblk_set_progr_mode(pblk, WRITE);
234         rqd->private = pblk;
235         rqd->end_io = end_io;
236
237         rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
238                                                         &rqd->dma_meta_list);
239         if (!rqd->meta_list)
240                 return -ENOMEM;
241
242         if (unlikely(nr_secs == 1))
243                 return 0;
244
245         rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
246         rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
247
248         return 0;
249 }
250
251 static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
252                            struct pblk_c_ctx *c_ctx, struct ppa_addr *erase_ppa)
253 {
254         struct pblk_line_meta *lm = &pblk->lm;
255         struct pblk_line *e_line = pblk_line_get_erase(pblk);
256         unsigned int valid = c_ctx->nr_valid;
257         unsigned int padded = c_ctx->nr_padded;
258         unsigned int nr_secs = valid + padded;
259         unsigned long *lun_bitmap;
260         int ret = 0;
261
262         lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
263         if (!lun_bitmap)
264                 return -ENOMEM;
265         c_ctx->lun_bitmap = lun_bitmap;
266
267         ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
268         if (ret) {
269                 kfree(lun_bitmap);
270                 return ret;
271         }
272
273         if (likely(!atomic_read(&e_line->left_eblks) || !e_line))
274                 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
275         else
276                 pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
277                                                         valid, erase_ppa);
278
279         return 0;
280 }
281
282 int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
283                         struct pblk_c_ctx *c_ctx)
284 {
285         struct pblk_line_meta *lm = &pblk->lm;
286         unsigned long *lun_bitmap;
287         int ret;
288
289         lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
290         if (!lun_bitmap)
291                 return -ENOMEM;
292
293         c_ctx->lun_bitmap = lun_bitmap;
294
295         ret = pblk_alloc_w_rq(pblk, rqd, rqd->nr_ppas, pblk_end_io_write);
296         if (ret)
297                 return ret;
298
299         pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, c_ctx->nr_valid, 0);
300
301         rqd->ppa_status = (u64)0;
302         rqd->flags = pblk_set_progr_mode(pblk, WRITE);
303
304         return ret;
305 }
306
307 static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
308                                   unsigned int secs_to_flush)
309 {
310         int secs_to_sync;
311
312         secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
313
314 #ifdef CONFIG_NVM_DEBUG
315         if ((!secs_to_sync && secs_to_flush)
316                         || (secs_to_sync < 0)
317                         || (secs_to_sync > secs_avail && !secs_to_flush)) {
318                 pr_err("pblk: bad sector calculation (a:%d,s:%d,f:%d)\n",
319                                 secs_avail, secs_to_sync, secs_to_flush);
320         }
321 #endif
322
323         return secs_to_sync;
324 }
325
326 static inline int pblk_valid_meta_ppa(struct pblk *pblk,
327                                       struct pblk_line *meta_line,
328                                       struct ppa_addr *ppa_list, int nr_ppas)
329 {
330         struct nvm_tgt_dev *dev = pblk->dev;
331         struct nvm_geo *geo = &dev->geo;
332         struct pblk_line *data_line;
333         struct ppa_addr ppa, ppa_opt;
334         u64 paddr;
335         int i;
336
337         data_line = &pblk->lines[pblk_dev_ppa_to_line(ppa_list[0])];
338         paddr = pblk_lookup_page(pblk, meta_line);
339         ppa = addr_to_gen_ppa(pblk, paddr, 0);
340
341         if (test_bit(pblk_ppa_to_pos(geo, ppa), data_line->blk_bitmap))
342                 return 1;
343
344         /* Schedule a metadata I/O that is half the distance from the data I/O
345          * with regards to the number of LUNs forming the pblk instance. This
346          * balances LUN conflicts across every I/O.
347          *
348          * When the LUN configuration changes (e.g., due to GC), this distance
349          * can align, which would result on a LUN deadlock. In this case, modify
350          * the distance to not be optimal, but allow metadata I/Os to succeed.
351          */
352         ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
353         if (unlikely(ppa_opt.ppa == ppa.ppa)) {
354                 data_line->meta_distance--;
355                 return 0;
356         }
357
358         for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
359                 if (ppa_list[i].g.ch == ppa_opt.g.ch &&
360                                         ppa_list[i].g.lun == ppa_opt.g.lun)
361                         return 1;
362
363         if (test_bit(pblk_ppa_to_pos(geo, ppa_opt), data_line->blk_bitmap)) {
364                 for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
365                         if (ppa_list[i].g.ch == ppa.g.ch &&
366                                                 ppa_list[i].g.lun == ppa.g.lun)
367                                 return 0;
368
369                 return 1;
370         }
371
372         return 0;
373 }
374
375 int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
376 {
377         struct nvm_tgt_dev *dev = pblk->dev;
378         struct nvm_geo *geo = &dev->geo;
379         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
380         struct pblk_line_meta *lm = &pblk->lm;
381         struct pblk_emeta *emeta = meta_line->emeta;
382         struct pblk_g_ctx *m_ctx;
383         struct pblk_lun *rlun;
384         struct bio *bio;
385         struct nvm_rq *rqd;
386         void *data;
387         u64 paddr;
388         int rq_ppas = pblk->min_write_pgs;
389         int id = meta_line->id;
390         int rq_len;
391         int i, j;
392         int ret;
393
394         rqd = pblk_alloc_rqd(pblk, READ);
395         if (IS_ERR(rqd)) {
396                 pr_err("pblk: cannot allocate write req.\n");
397                 return PTR_ERR(rqd);
398         }
399         m_ctx = nvm_rq_to_pdu(rqd);
400         m_ctx->private = meta_line;
401
402         rq_len = rq_ppas * geo->sec_size;
403         data = ((void *)emeta->buf) + emeta->mem;
404
405         bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len, GFP_KERNEL);
406         if (IS_ERR(bio)) {
407                 ret = PTR_ERR(bio);
408                 goto fail_free_rqd;
409         }
410         bio->bi_iter.bi_sector = 0; /* internal bio */
411         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
412         rqd->bio = bio;
413
414         ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
415         if (ret)
416                 goto fail_free_bio;
417
418         for (i = 0; i < rqd->nr_ppas; ) {
419                 spin_lock(&meta_line->lock);
420                 paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
421                 spin_unlock(&meta_line->lock);
422                 for (j = 0; j < rq_ppas; j++, i++, paddr++)
423                         rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
424         }
425
426         rlun = &pblk->luns[pblk_ppa_to_pos(geo, rqd->ppa_list[0])];
427         ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
428         if (ret) {
429                 pr_err("pblk: lun semaphore timed out (%d)\n", ret);
430                 goto fail_free_bio;
431         }
432
433         emeta->mem += rq_len;
434         if (emeta->mem >= lm->emeta_len[0]) {
435                 spin_lock(&l_mg->close_lock);
436                 list_del(&meta_line->list);
437                 WARN(!bitmap_full(meta_line->map_bitmap, lm->sec_per_line),
438                                 "pblk: corrupt meta line %d\n", meta_line->id);
439                 spin_unlock(&l_mg->close_lock);
440         }
441
442         ret = pblk_submit_io(pblk, rqd);
443         if (ret) {
444                 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
445                 goto fail_rollback;
446         }
447
448         return NVM_IO_OK;
449
450 fail_rollback:
451         spin_lock(&l_mg->close_lock);
452         pblk_dealloc_page(pblk, meta_line, rq_ppas);
453         list_add(&meta_line->list, &meta_line->list);
454         spin_unlock(&l_mg->close_lock);
455 fail_free_bio:
456         bio_put(bio);
457 fail_free_rqd:
458         pblk_free_rqd(pblk, rqd, READ);
459         return ret;
460 }
461
462 static int pblk_sched_meta_io(struct pblk *pblk, struct ppa_addr *prev_list,
463                                int prev_n)
464 {
465         struct pblk_line_meta *lm = &pblk->lm;
466         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
467         struct pblk_line *meta_line;
468
469         spin_lock(&l_mg->close_lock);
470 retry:
471         if (list_empty(&l_mg->emeta_list)) {
472                 spin_unlock(&l_mg->close_lock);
473                 return 0;
474         }
475         meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
476         if (bitmap_full(meta_line->map_bitmap, lm->sec_per_line))
477                 goto retry;
478         spin_unlock(&l_mg->close_lock);
479
480         if (!pblk_valid_meta_ppa(pblk, meta_line, prev_list, prev_n))
481                 return 0;
482
483         return pblk_submit_meta_io(pblk, meta_line);
484 }
485
486 static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
487 {
488         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
489         struct ppa_addr erase_ppa;
490         int err;
491
492         ppa_set_empty(&erase_ppa);
493
494         /* Assign lbas to ppas and populate request structure */
495         err = pblk_setup_w_rq(pblk, rqd, c_ctx, &erase_ppa);
496         if (err) {
497                 pr_err("pblk: could not setup write request: %d\n", err);
498                 return NVM_IO_ERR;
499         }
500
501         if (likely(ppa_empty(erase_ppa))) {
502                 /* Submit metadata write for previous data line */
503                 err = pblk_sched_meta_io(pblk, rqd->ppa_list, rqd->nr_ppas);
504                 if (err) {
505                         pr_err("pblk: metadata I/O submission failed: %d", err);
506                         return NVM_IO_ERR;
507                 }
508
509                 /* Submit data write for current data line */
510                 err = pblk_submit_io(pblk, rqd);
511                 if (err) {
512                         pr_err("pblk: data I/O submission failed: %d\n", err);
513                         return NVM_IO_ERR;
514                 }
515         } else {
516                 /* Submit data write for current data line */
517                 err = pblk_submit_io(pblk, rqd);
518                 if (err) {
519                         pr_err("pblk: data I/O submission failed: %d\n", err);
520                         return NVM_IO_ERR;
521                 }
522
523                 /* Submit available erase for next data line */
524                 if (pblk_blk_erase_async(pblk, erase_ppa)) {
525                         struct pblk_line *e_line = pblk_line_get_erase(pblk);
526                         struct nvm_tgt_dev *dev = pblk->dev;
527                         struct nvm_geo *geo = &dev->geo;
528                         int bit;
529
530                         atomic_inc(&e_line->left_eblks);
531                         bit = pblk_ppa_to_pos(geo, erase_ppa);
532                         WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
533                 }
534         }
535
536         return NVM_IO_OK;
537 }
538
539 static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
540 {
541         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
542         struct bio *bio = rqd->bio;
543
544         if (c_ctx->nr_padded)
545                 pblk_bio_free_pages(pblk, bio, rqd->nr_ppas, c_ctx->nr_padded);
546 }
547
548 static int pblk_submit_write(struct pblk *pblk)
549 {
550         struct bio *bio;
551         struct nvm_rq *rqd;
552         unsigned int secs_avail, secs_to_sync, secs_to_com;
553         unsigned int secs_to_flush;
554         unsigned long pos;
555
556         /* If there are no sectors in the cache, flushes (bios without data)
557          * will be cleared on the cache threads
558          */
559         secs_avail = pblk_rb_read_count(&pblk->rwb);
560         if (!secs_avail)
561                 return 1;
562
563         secs_to_flush = pblk_rb_sync_point_count(&pblk->rwb);
564         if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
565                 return 1;
566
567         rqd = pblk_alloc_rqd(pblk, WRITE);
568         if (IS_ERR(rqd)) {
569                 pr_err("pblk: cannot allocate write req.\n");
570                 return 1;
571         }
572
573         bio = bio_alloc(GFP_KERNEL, pblk->max_write_pgs);
574         if (!bio) {
575                 pr_err("pblk: cannot allocate write bio\n");
576                 goto fail_free_rqd;
577         }
578         bio->bi_iter.bi_sector = 0; /* internal bio */
579         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
580         rqd->bio = bio;
581
582         secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, secs_to_flush);
583         if (secs_to_sync > pblk->max_write_pgs) {
584                 pr_err("pblk: bad buffer sync calculation\n");
585                 goto fail_put_bio;
586         }
587
588         secs_to_com = (secs_to_sync > secs_avail) ? secs_avail : secs_to_sync;
589         pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
590
591         if (pblk_rb_read_to_bio(&pblk->rwb, rqd, bio, pos, secs_to_sync,
592                                                                 secs_avail)) {
593                 pr_err("pblk: corrupted write bio\n");
594                 goto fail_put_bio;
595         }
596
597         if (pblk_submit_io_set(pblk, rqd))
598                 goto fail_free_bio;
599
600 #ifdef CONFIG_NVM_DEBUG
601         atomic_long_add(secs_to_sync, &pblk->sub_writes);
602 #endif
603
604         return 0;
605
606 fail_free_bio:
607         pblk_free_write_rqd(pblk, rqd);
608 fail_put_bio:
609         bio_put(bio);
610 fail_free_rqd:
611         pblk_free_rqd(pblk, rqd, WRITE);
612
613         return 1;
614 }
615
616 int pblk_write_ts(void *data)
617 {
618         struct pblk *pblk = data;
619
620         while (!kthread_should_stop()) {
621                 if (!pblk_submit_write(pblk))
622                         continue;
623                 set_current_state(TASK_INTERRUPTIBLE);
624                 io_schedule();
625         }
626
627         return 0;
628 }