]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/logfs/dev_bdev.c
logfs: query block device for number of pages to send with bio
[karo-tx-linux.git] / fs / logfs / dev_bdev.c
1 /*
2  * fs/logfs/dev_bdev.c  - Device access methods for block devices
3  *
4  * As should be obvious for Linux kernel code, license is GPLv2
5  *
6  * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
7  */
8 #include "logfs.h"
9 #include <linux/bio.h>
10 #include <linux/blkdev.h>
11 #include <linux/buffer_head.h>
12 #include <linux/gfp.h>
13 #include <linux/prefetch.h>
14
15 #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
16
17 static void request_complete(struct bio *bio, int err)
18 {
19         complete((struct completion *)bio->bi_private);
20 }
21
22 static int sync_request(struct page *page, struct block_device *bdev, int rw)
23 {
24         struct bio bio;
25         struct bio_vec bio_vec;
26         struct completion complete;
27
28         bio_init(&bio);
29         bio.bi_max_vecs = 1;
30         bio.bi_io_vec = &bio_vec;
31         bio_vec.bv_page = page;
32         bio_vec.bv_len = PAGE_SIZE;
33         bio_vec.bv_offset = 0;
34         bio.bi_vcnt = 1;
35         bio.bi_idx = 0;
36         bio.bi_size = PAGE_SIZE;
37         bio.bi_bdev = bdev;
38         bio.bi_sector = page->index * (PAGE_SIZE >> 9);
39         init_completion(&complete);
40         bio.bi_private = &complete;
41         bio.bi_end_io = request_complete;
42
43         submit_bio(rw, &bio);
44         wait_for_completion(&complete);
45         return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
46 }
47
48 static int bdev_readpage(void *_sb, struct page *page)
49 {
50         struct super_block *sb = _sb;
51         struct block_device *bdev = logfs_super(sb)->s_bdev;
52         int err;
53
54         err = sync_request(page, bdev, READ);
55         if (err) {
56                 ClearPageUptodate(page);
57                 SetPageError(page);
58         } else {
59                 SetPageUptodate(page);
60                 ClearPageError(page);
61         }
62         unlock_page(page);
63         return err;
64 }
65
66 static DECLARE_WAIT_QUEUE_HEAD(wq);
67
68 static void writeseg_end_io(struct bio *bio, int err)
69 {
70         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
71         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
72         struct super_block *sb = bio->bi_private;
73         struct logfs_super *super = logfs_super(sb);
74         struct page *page;
75
76         BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
77         BUG_ON(err);
78         BUG_ON(bio->bi_vcnt == 0);
79         do {
80                 page = bvec->bv_page;
81                 if (--bvec >= bio->bi_io_vec)
82                         prefetchw(&bvec->bv_page->flags);
83
84                 end_page_writeback(page);
85                 page_cache_release(page);
86         } while (bvec >= bio->bi_io_vec);
87         bio_put(bio);
88         if (atomic_dec_and_test(&super->s_pending_writes))
89                 wake_up(&wq);
90 }
91
92 static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
93                 size_t nr_pages)
94 {
95         struct logfs_super *super = logfs_super(sb);
96         struct address_space *mapping = super->s_mapping_inode->i_mapping;
97         struct bio *bio;
98         struct page *page;
99         unsigned int max_pages;
100         int i;
101
102         max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
103
104         bio = bio_alloc(GFP_NOFS, max_pages);
105         BUG_ON(!bio);
106
107         for (i = 0; i < nr_pages; i++) {
108                 if (i >= max_pages) {
109                         /* Block layer cannot split bios :( */
110                         bio->bi_vcnt = i;
111                         bio->bi_idx = 0;
112                         bio->bi_size = i * PAGE_SIZE;
113                         bio->bi_bdev = super->s_bdev;
114                         bio->bi_sector = ofs >> 9;
115                         bio->bi_private = sb;
116                         bio->bi_end_io = writeseg_end_io;
117                         atomic_inc(&super->s_pending_writes);
118                         submit_bio(WRITE, bio);
119
120                         ofs += i * PAGE_SIZE;
121                         index += i;
122                         nr_pages -= i;
123                         i = 0;
124
125                         bio = bio_alloc(GFP_NOFS, max_pages);
126                         BUG_ON(!bio);
127                 }
128                 page = find_lock_page(mapping, index + i);
129                 BUG_ON(!page);
130                 bio->bi_io_vec[i].bv_page = page;
131                 bio->bi_io_vec[i].bv_len = PAGE_SIZE;
132                 bio->bi_io_vec[i].bv_offset = 0;
133
134                 BUG_ON(PageWriteback(page));
135                 set_page_writeback(page);
136                 unlock_page(page);
137         }
138         bio->bi_vcnt = nr_pages;
139         bio->bi_idx = 0;
140         bio->bi_size = nr_pages * PAGE_SIZE;
141         bio->bi_bdev = super->s_bdev;
142         bio->bi_sector = ofs >> 9;
143         bio->bi_private = sb;
144         bio->bi_end_io = writeseg_end_io;
145         atomic_inc(&super->s_pending_writes);
146         submit_bio(WRITE, bio);
147         return 0;
148 }
149
150 static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
151 {
152         struct logfs_super *super = logfs_super(sb);
153         int head;
154
155         BUG_ON(super->s_flags & LOGFS_SB_FLAG_RO);
156
157         if (len == 0) {
158                 /* This can happen when the object fit perfectly into a
159                  * segment, the segment gets written per sync and subsequently
160                  * closed.
161                  */
162                 return;
163         }
164         head = ofs & (PAGE_SIZE - 1);
165         if (head) {
166                 ofs -= head;
167                 len += head;
168         }
169         len = PAGE_ALIGN(len);
170         __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
171 }
172
173
174 static void erase_end_io(struct bio *bio, int err) 
175
176         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 
177         struct super_block *sb = bio->bi_private; 
178         struct logfs_super *super = logfs_super(sb); 
179
180         BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */ 
181         BUG_ON(err); 
182         BUG_ON(bio->bi_vcnt == 0); 
183         bio_put(bio); 
184         if (atomic_dec_and_test(&super->s_pending_writes))
185                 wake_up(&wq); 
186
187
188 static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
189                 size_t nr_pages)
190 {
191         struct logfs_super *super = logfs_super(sb);
192         struct bio *bio;
193         unsigned int max_pages;
194         int i;
195
196         max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
197
198         bio = bio_alloc(GFP_NOFS, max_pages);
199         BUG_ON(!bio);
200
201         for (i = 0; i < nr_pages; i++) {
202                 if (i >= max_pages) {
203                         /* Block layer cannot split bios :( */
204                         bio->bi_vcnt = i;
205                         bio->bi_idx = 0;
206                         bio->bi_size = i * PAGE_SIZE;
207                         bio->bi_bdev = super->s_bdev;
208                         bio->bi_sector = ofs >> 9;
209                         bio->bi_private = sb;
210                         bio->bi_end_io = erase_end_io;
211                         atomic_inc(&super->s_pending_writes);
212                         submit_bio(WRITE, bio);
213
214                         ofs += i * PAGE_SIZE;
215                         index += i;
216                         nr_pages -= i;
217                         i = 0;
218
219                         bio = bio_alloc(GFP_NOFS, max_pages);
220                         BUG_ON(!bio);
221                 }
222                 bio->bi_io_vec[i].bv_page = super->s_erase_page;
223                 bio->bi_io_vec[i].bv_len = PAGE_SIZE;
224                 bio->bi_io_vec[i].bv_offset = 0;
225         }
226         bio->bi_vcnt = nr_pages;
227         bio->bi_idx = 0;
228         bio->bi_size = nr_pages * PAGE_SIZE;
229         bio->bi_bdev = super->s_bdev;
230         bio->bi_sector = ofs >> 9;
231         bio->bi_private = sb;
232         bio->bi_end_io = erase_end_io;
233         atomic_inc(&super->s_pending_writes);
234         submit_bio(WRITE, bio);
235         return 0;
236 }
237
238 static int bdev_erase(struct super_block *sb, loff_t to, size_t len,
239                 int ensure_write)
240 {
241         struct logfs_super *super = logfs_super(sb);
242
243         BUG_ON(to & (PAGE_SIZE - 1));
244         BUG_ON(len & (PAGE_SIZE - 1));
245
246         if (super->s_flags & LOGFS_SB_FLAG_RO)
247                 return -EROFS;
248
249         if (ensure_write) {
250                 /*
251                  * Object store doesn't care whether erases happen or not.
252                  * But for the journal they are required.  Otherwise a scan
253                  * can find an old commit entry and assume it is the current
254                  * one, travelling back in time.
255                  */
256                 do_erase(sb, to, to >> PAGE_SHIFT, len >> PAGE_SHIFT);
257         }
258
259         return 0;
260 }
261
262 static void bdev_sync(struct super_block *sb)
263 {
264         struct logfs_super *super = logfs_super(sb);
265
266         wait_event(wq, atomic_read(&super->s_pending_writes) == 0);
267 }
268
269 static struct page *bdev_find_first_sb(struct super_block *sb, u64 *ofs)
270 {
271         struct logfs_super *super = logfs_super(sb);
272         struct address_space *mapping = super->s_mapping_inode->i_mapping;
273         filler_t *filler = bdev_readpage;
274
275         *ofs = 0;
276         return read_cache_page(mapping, 0, filler, sb);
277 }
278
279 static struct page *bdev_find_last_sb(struct super_block *sb, u64 *ofs)
280 {
281         struct logfs_super *super = logfs_super(sb);
282         struct address_space *mapping = super->s_mapping_inode->i_mapping;
283         filler_t *filler = bdev_readpage;
284         u64 pos = (super->s_bdev->bd_inode->i_size & ~0xfffULL) - 0x1000;
285         pgoff_t index = pos >> PAGE_SHIFT;
286
287         *ofs = pos;
288         return read_cache_page(mapping, index, filler, sb);
289 }
290
291 static int bdev_write_sb(struct super_block *sb, struct page *page)
292 {
293         struct block_device *bdev = logfs_super(sb)->s_bdev;
294
295         /* Nothing special to do for block devices. */
296         return sync_request(page, bdev, WRITE);
297 }
298
299 static void bdev_put_device(struct logfs_super *s)
300 {
301         blkdev_put(s->s_bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
302 }
303
304 static int bdev_can_write_buf(struct super_block *sb, u64 ofs)
305 {
306         return 0;
307 }
308
309 static const struct logfs_device_ops bd_devops = {
310         .find_first_sb  = bdev_find_first_sb,
311         .find_last_sb   = bdev_find_last_sb,
312         .write_sb       = bdev_write_sb,
313         .readpage       = bdev_readpage,
314         .writeseg       = bdev_writeseg,
315         .erase          = bdev_erase,
316         .can_write_buf  = bdev_can_write_buf,
317         .sync           = bdev_sync,
318         .put_device     = bdev_put_device,
319 };
320
321 int logfs_get_sb_bdev(struct logfs_super *p, struct file_system_type *type,
322                 const char *devname)
323 {
324         struct block_device *bdev;
325
326         bdev = blkdev_get_by_path(devname, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
327                                   type);
328         if (IS_ERR(bdev))
329                 return PTR_ERR(bdev);
330
331         if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
332                 int mtdnr = MINOR(bdev->bd_dev);
333                 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
334                 return logfs_get_sb_mtd(p, mtdnr);
335         }
336
337         p->s_bdev = bdev;
338         p->s_mtd = NULL;
339         p->s_devops = &bd_devops;
340         return 0;
341 }