]> git.karo-electronics.de Git - karo-tx-linux.git/blob - block/blk-lib.c
block: prep work for batch completion
[karo-tx-linux.git] / block / blk-lib.c
1 /*
2  * Functions related to generic helpers functions
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
9
10 #include "blk.h"
11
12 struct bio_batch {
13         atomic_t                done;
14         unsigned long           flags;
15         struct completion       *wait;
16 };
17
18 static void bio_batch_end_io(struct bio *bio, int err,
19                              struct batch_complete *batch)
20 {
21         struct bio_batch *bb = bio->bi_private;
22
23         if (err && (err != -EOPNOTSUPP))
24                 clear_bit(BIO_UPTODATE, &bb->flags);
25         if (atomic_dec_and_test(&bb->done))
26                 complete(bb->wait);
27         bio_put(bio);
28 }
29
30 /**
31  * blkdev_issue_discard - queue a discard
32  * @bdev:       blockdev to issue discard for
33  * @sector:     start sector
34  * @nr_sects:   number of sectors to discard
35  * @gfp_mask:   memory allocation flags (for bio_alloc)
36  * @flags:      BLKDEV_IFL_* flags to control behaviour
37  *
38  * Description:
39  *    Issue a discard request for the sectors in question.
40  */
41 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
42                 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
43 {
44         DECLARE_COMPLETION_ONSTACK(wait);
45         struct request_queue *q = bdev_get_queue(bdev);
46         int type = REQ_WRITE | REQ_DISCARD;
47         sector_t max_discard_sectors;
48         sector_t granularity, alignment;
49         struct bio_batch bb;
50         struct bio *bio;
51         int ret = 0;
52         struct blk_plug plug;
53
54         if (!q)
55                 return -ENXIO;
56
57         if (!blk_queue_discard(q))
58                 return -EOPNOTSUPP;
59
60         /* Zero-sector (unknown) and one-sector granularities are the same.  */
61         granularity = max(q->limits.discard_granularity >> 9, 1U);
62         alignment = bdev_discard_alignment(bdev) >> 9;
63         alignment = sector_div(alignment, granularity);
64
65         /*
66          * Ensure that max_discard_sectors is of the proper
67          * granularity, so that requests stay aligned after a split.
68          */
69         max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
70         sector_div(max_discard_sectors, granularity);
71         max_discard_sectors *= granularity;
72         if (unlikely(!max_discard_sectors)) {
73                 /* Avoid infinite loop below. Being cautious never hurts. */
74                 return -EOPNOTSUPP;
75         }
76
77         if (flags & BLKDEV_DISCARD_SECURE) {
78                 if (!blk_queue_secdiscard(q))
79                         return -EOPNOTSUPP;
80                 type |= REQ_SECURE;
81         }
82
83         atomic_set(&bb.done, 1);
84         bb.flags = 1 << BIO_UPTODATE;
85         bb.wait = &wait;
86
87         blk_start_plug(&plug);
88         while (nr_sects) {
89                 unsigned int req_sects;
90                 sector_t end_sect, tmp;
91
92                 bio = bio_alloc(gfp_mask, 1);
93                 if (!bio) {
94                         ret = -ENOMEM;
95                         break;
96                 }
97
98                 req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
99
100                 /*
101                  * If splitting a request, and the next starting sector would be
102                  * misaligned, stop the discard at the previous aligned sector.
103                  */
104                 end_sect = sector + req_sects;
105                 tmp = end_sect;
106                 if (req_sects < nr_sects &&
107                     sector_div(tmp, granularity) != alignment) {
108                         end_sect = end_sect - alignment;
109                         sector_div(end_sect, granularity);
110                         end_sect = end_sect * granularity + alignment;
111                         req_sects = end_sect - sector;
112                 }
113
114                 bio->bi_sector = sector;
115                 bio->bi_end_io = bio_batch_end_io;
116                 bio->bi_bdev = bdev;
117                 bio->bi_private = &bb;
118
119                 bio->bi_size = req_sects << 9;
120                 nr_sects -= req_sects;
121                 sector = end_sect;
122
123                 atomic_inc(&bb.done);
124                 submit_bio(type, bio);
125         }
126         blk_finish_plug(&plug);
127
128         /* Wait for bios in-flight */
129         if (!atomic_dec_and_test(&bb.done))
130                 wait_for_completion_io(&wait);
131
132         if (!test_bit(BIO_UPTODATE, &bb.flags))
133                 ret = -EIO;
134
135         return ret;
136 }
137 EXPORT_SYMBOL(blkdev_issue_discard);
138
139 /**
140  * blkdev_issue_write_same - queue a write same operation
141  * @bdev:       target blockdev
142  * @sector:     start sector
143  * @nr_sects:   number of sectors to write
144  * @gfp_mask:   memory allocation flags (for bio_alloc)
145  * @page:       page containing data to write
146  *
147  * Description:
148  *    Issue a write same request for the sectors in question.
149  */
150 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
151                             sector_t nr_sects, gfp_t gfp_mask,
152                             struct page *page)
153 {
154         DECLARE_COMPLETION_ONSTACK(wait);
155         struct request_queue *q = bdev_get_queue(bdev);
156         unsigned int max_write_same_sectors;
157         struct bio_batch bb;
158         struct bio *bio;
159         int ret = 0;
160
161         if (!q)
162                 return -ENXIO;
163
164         max_write_same_sectors = q->limits.max_write_same_sectors;
165
166         if (max_write_same_sectors == 0)
167                 return -EOPNOTSUPP;
168
169         atomic_set(&bb.done, 1);
170         bb.flags = 1 << BIO_UPTODATE;
171         bb.wait = &wait;
172
173         while (nr_sects) {
174                 bio = bio_alloc(gfp_mask, 1);
175                 if (!bio) {
176                         ret = -ENOMEM;
177                         break;
178                 }
179
180                 bio->bi_sector = sector;
181                 bio->bi_end_io = bio_batch_end_io;
182                 bio->bi_bdev = bdev;
183                 bio->bi_private = &bb;
184                 bio->bi_vcnt = 1;
185                 bio->bi_io_vec->bv_page = page;
186                 bio->bi_io_vec->bv_offset = 0;
187                 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
188
189                 if (nr_sects > max_write_same_sectors) {
190                         bio->bi_size = max_write_same_sectors << 9;
191                         nr_sects -= max_write_same_sectors;
192                         sector += max_write_same_sectors;
193                 } else {
194                         bio->bi_size = nr_sects << 9;
195                         nr_sects = 0;
196                 }
197
198                 atomic_inc(&bb.done);
199                 submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
200         }
201
202         /* Wait for bios in-flight */
203         if (!atomic_dec_and_test(&bb.done))
204                 wait_for_completion_io(&wait);
205
206         if (!test_bit(BIO_UPTODATE, &bb.flags))
207                 ret = -ENOTSUPP;
208
209         return ret;
210 }
211 EXPORT_SYMBOL(blkdev_issue_write_same);
212
213 /**
214  * blkdev_issue_zeroout - generate number of zero filed write bios
215  * @bdev:       blockdev to issue
216  * @sector:     start sector
217  * @nr_sects:   number of sectors to write
218  * @gfp_mask:   memory allocation flags (for bio_alloc)
219  *
220  * Description:
221  *  Generate and issue number of bios with zerofiled pages.
222  */
223
224 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
225                         sector_t nr_sects, gfp_t gfp_mask)
226 {
227         int ret;
228         struct bio *bio;
229         struct bio_batch bb;
230         unsigned int sz;
231         DECLARE_COMPLETION_ONSTACK(wait);
232
233         atomic_set(&bb.done, 1);
234         bb.flags = 1 << BIO_UPTODATE;
235         bb.wait = &wait;
236
237         ret = 0;
238         while (nr_sects != 0) {
239                 bio = bio_alloc(gfp_mask,
240                                 min(nr_sects, (sector_t)BIO_MAX_PAGES));
241                 if (!bio) {
242                         ret = -ENOMEM;
243                         break;
244                 }
245
246                 bio->bi_sector = sector;
247                 bio->bi_bdev   = bdev;
248                 bio->bi_end_io = bio_batch_end_io;
249                 bio->bi_private = &bb;
250
251                 while (nr_sects != 0) {
252                         sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
253                         ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
254                         nr_sects -= ret >> 9;
255                         sector += ret >> 9;
256                         if (ret < (sz << 9))
257                                 break;
258                 }
259                 ret = 0;
260                 atomic_inc(&bb.done);
261                 submit_bio(WRITE, bio);
262         }
263
264         /* Wait for bios in-flight */
265         if (!atomic_dec_and_test(&bb.done))
266                 wait_for_completion_io(&wait);
267
268         if (!test_bit(BIO_UPTODATE, &bb.flags))
269                 /* One of bios in the batch was completed with error.*/
270                 ret = -EIO;
271
272         return ret;
273 }
274
275 /**
276  * blkdev_issue_zeroout - zero-fill a block range
277  * @bdev:       blockdev to write
278  * @sector:     start sector
279  * @nr_sects:   number of sectors to write
280  * @gfp_mask:   memory allocation flags (for bio_alloc)
281  *
282  * Description:
283  *  Generate and issue number of bios with zerofiled pages.
284  */
285
286 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
287                          sector_t nr_sects, gfp_t gfp_mask)
288 {
289         if (bdev_write_same(bdev)) {
290                 unsigned char bdn[BDEVNAME_SIZE];
291
292                 if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
293                                              ZERO_PAGE(0)))
294                         return 0;
295
296                 bdevname(bdev, bdn);
297                 pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn);
298         }
299
300         return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
301 }
302 EXPORT_SYMBOL(blkdev_issue_zeroout);