2 * linux/fs/hpfs/buffer.c
4 * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
13 secno hpfs_search_hotfix_map(struct super_block *s, secno sec)
16 struct hpfs_sb_info *sbi = hpfs_sb(s);
17 for (i = 0; unlikely(i < sbi->n_hotfixes); i++) {
18 if (sbi->hotfix_from[i] == sec) {
19 return sbi->hotfix_to[i];
25 unsigned hpfs_search_hotfix_map_for_range(struct super_block *s, secno sec, unsigned n)
28 struct hpfs_sb_info *sbi = hpfs_sb(s);
29 for (i = 0; unlikely(i < sbi->n_hotfixes); i++) {
30 if (sbi->hotfix_from[i] >= sec && sbi->hotfix_from[i] < sec + n) {
31 n = sbi->hotfix_from[i] - sec;
37 void hpfs_prefetch_sectors(struct super_block *s, unsigned secno, int n)
39 struct buffer_head *bh;
42 if (n <= 0 || unlikely(secno >= hpfs_sb(s)->sb_fs_size))
45 if (unlikely(hpfs_search_hotfix_map_for_range(s, secno, n) != n))
48 bh = sb_find_get_block(s, secno);
50 if (buffer_uptodate(bh)) {
57 blk_start_plug(&plug);
59 if (unlikely(secno >= hpfs_sb(s)->sb_fs_size))
61 sb_breadahead(s, secno);
65 blk_finish_plug(&plug);
68 /* Map a sector into a buffer and return pointers to it and to the buffer. */
70 void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp,
73 struct buffer_head *bh;
77 hpfs_prefetch_sectors(s, secno, ahead);
81 *bhp = bh = sb_bread(s, hpfs_search_hotfix_map(s, secno));
85 pr_err("%s(): read error\n", __func__);
90 /* Like hpfs_map_sector but don't read anything */
92 void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp)
94 struct buffer_head *bh;
95 /*return hpfs_map_sector(s, secno, bhp, 0);*/
101 if ((*bhp = bh = sb_getblk(s, hpfs_search_hotfix_map(s, secno))) != NULL) {
102 if (!buffer_uptodate(bh)) wait_on_buffer(bh);
103 set_buffer_uptodate(bh);
106 pr_err("%s(): getblk failed\n", __func__);
111 /* Map 4 sectors into a 4buffer and return pointers to it and to the buffer. */
113 void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh,
123 pr_err("%s(): unaligned read\n", __func__);
127 hpfs_prefetch_sectors(s, secno, 4 + ahead);
129 if (!hpfs_map_sector(s, secno + 0, &qbh->bh[0], 0)) goto bail0;
130 if (!hpfs_map_sector(s, secno + 1, &qbh->bh[1], 0)) goto bail1;
131 if (!hpfs_map_sector(s, secno + 2, &qbh->bh[2], 0)) goto bail2;
132 if (!hpfs_map_sector(s, secno + 3, &qbh->bh[3], 0)) goto bail3;
134 if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
135 likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
136 likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) {
137 return qbh->data = qbh->bh[0]->b_data;
140 qbh->data = data = kmalloc(2048, GFP_NOFS);
142 pr_err("%s(): out of memory\n", __func__);
146 memcpy(data + 0 * 512, qbh->bh[0]->b_data, 512);
147 memcpy(data + 1 * 512, qbh->bh[1]->b_data, 512);
148 memcpy(data + 2 * 512, qbh->bh[2]->b_data, 512);
149 memcpy(data + 3 * 512, qbh->bh[3]->b_data, 512);
165 /* Don't read sectors */
167 void *hpfs_get_4sectors(struct super_block *s, unsigned secno,
168 struct quad_buffer_head *qbh)
175 pr_err("%s(): unaligned read\n", __func__);
179 if (!hpfs_get_sector(s, secno + 0, &qbh->bh[0])) goto bail0;
180 if (!hpfs_get_sector(s, secno + 1, &qbh->bh[1])) goto bail1;
181 if (!hpfs_get_sector(s, secno + 2, &qbh->bh[2])) goto bail2;
182 if (!hpfs_get_sector(s, secno + 3, &qbh->bh[3])) goto bail3;
184 if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
185 likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
186 likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) {
187 return qbh->data = qbh->bh[0]->b_data;
190 if (!(qbh->data = kmalloc(2048, GFP_NOFS))) {
191 pr_err("%s(): out of memory\n", __func__);
209 void hpfs_brelse4(struct quad_buffer_head *qbh)
211 if (unlikely(qbh->data != qbh->bh[0]->b_data))
219 void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh)
221 if (unlikely(qbh->data != qbh->bh[0]->b_data)) {
222 memcpy(qbh->bh[0]->b_data, qbh->data + 0 * 512, 512);
223 memcpy(qbh->bh[1]->b_data, qbh->data + 1 * 512, 512);
224 memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512);
225 memcpy(qbh->bh[3]->b_data, qbh->data + 3 * 512, 512);
227 mark_buffer_dirty(qbh->bh[0]);
228 mark_buffer_dirty(qbh->bh[1]);
229 mark_buffer_dirty(qbh->bh[2]);
230 mark_buffer_dirty(qbh->bh[3]);