]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/ufs/balloc.c
ufs: fix reserved blocks check
[karo-tx-linux.git] / fs / ufs / balloc.c
1 /*
2  *  linux/fs/ufs/balloc.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  *
8  * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
9  */
10
11 #include <linux/fs.h>
12 #include <linux/stat.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/bitops.h>
18 #include <linux/bio.h>
19 #include <asm/byteorder.h>
20
21 #include "ufs_fs.h"
22 #include "ufs.h"
23 #include "swab.h"
24 #include "util.h"
25
26 #define INVBLOCK ((u64)-1L)
27
28 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
29 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
30 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
31 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
32 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
33 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
34
35 /*
36  * Free 'count' fragments from fragment number 'fragment'
37  */
38 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
39 {
40         struct super_block * sb;
41         struct ufs_sb_private_info * uspi;
42         struct ufs_cg_private_info * ucpi;
43         struct ufs_cylinder_group * ucg;
44         unsigned cgno, bit, end_bit, bbase, blkmap, i;
45         u64 blkno;
46         
47         sb = inode->i_sb;
48         uspi = UFS_SB(sb)->s_uspi;
49         
50         UFSD("ENTER, fragment %llu, count %u\n",
51              (unsigned long long)fragment, count);
52         
53         if (ufs_fragnum(fragment) + count > uspi->s_fpg)
54                 ufs_error (sb, "ufs_free_fragments", "internal error");
55
56         mutex_lock(&UFS_SB(sb)->s_lock);
57         
58         cgno = ufs_dtog(uspi, fragment);
59         bit = ufs_dtogd(uspi, fragment);
60         if (cgno >= uspi->s_ncg) {
61                 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
62                 goto failed;
63         }
64                 
65         ucpi = ufs_load_cylinder (sb, cgno);
66         if (!ucpi) 
67                 goto failed;
68         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
69         if (!ufs_cg_chkmagic(sb, ucg)) {
70                 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
71                 goto failed;
72         }
73
74         end_bit = bit + count;
75         bbase = ufs_blknum (bit);
76         blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
77         ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
78         for (i = bit; i < end_bit; i++) {
79                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
80                         ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
81                 else 
82                         ufs_error (sb, "ufs_free_fragments",
83                                    "bit already cleared for fragment %u", i);
84         }
85
86         inode_sub_bytes(inode, count << uspi->s_fshift);
87         fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
88         uspi->cs_total.cs_nffree += count;
89         fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
90         blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
91         ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
92
93         /*
94          * Trying to reassemble free fragments into block
95          */
96         blkno = ufs_fragstoblks (bbase);
97         if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
98                 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
99                 uspi->cs_total.cs_nffree -= uspi->s_fpb;
100                 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
101                 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
102                         ufs_clusteracct (sb, ucpi, blkno, 1);
103                 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
104                 uspi->cs_total.cs_nbfree++;
105                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
106                 if (uspi->fs_magic != UFS2_MAGIC) {
107                         unsigned cylno = ufs_cbtocylno (bbase);
108
109                         fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
110                                                   ufs_cbtorpos(bbase)), 1);
111                         fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
112                 }
113         }
114         
115         ubh_mark_buffer_dirty (USPI_UBH(uspi));
116         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
117         if (sb->s_flags & MS_SYNCHRONOUS)
118                 ubh_sync_block(UCPI_UBH(ucpi));
119         ufs_mark_sb_dirty(sb);
120
121         mutex_unlock(&UFS_SB(sb)->s_lock);
122         UFSD("EXIT\n");
123         return;
124
125 failed:
126         mutex_unlock(&UFS_SB(sb)->s_lock);
127         UFSD("EXIT (FAILED)\n");
128         return;
129 }
130
131 /*
132  * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
133  */
134 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
135 {
136         struct super_block * sb;
137         struct ufs_sb_private_info * uspi;
138         struct ufs_cg_private_info * ucpi;
139         struct ufs_cylinder_group * ucg;
140         unsigned overflow, cgno, bit, end_bit, i;
141         u64 blkno;
142         
143         sb = inode->i_sb;
144         uspi = UFS_SB(sb)->s_uspi;
145
146         UFSD("ENTER, fragment %llu, count %u\n",
147              (unsigned long long)fragment, count);
148         
149         if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
150                 ufs_error (sb, "ufs_free_blocks", "internal error, "
151                            "fragment %llu, count %u\n",
152                            (unsigned long long)fragment, count);
153                 goto failed;
154         }
155
156         mutex_lock(&UFS_SB(sb)->s_lock);
157         
158 do_more:
159         overflow = 0;
160         cgno = ufs_dtog(uspi, fragment);
161         bit = ufs_dtogd(uspi, fragment);
162         if (cgno >= uspi->s_ncg) {
163                 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
164                 goto failed_unlock;
165         }
166         end_bit = bit + count;
167         if (end_bit > uspi->s_fpg) {
168                 overflow = bit + count - uspi->s_fpg;
169                 count -= overflow;
170                 end_bit -= overflow;
171         }
172
173         ucpi = ufs_load_cylinder (sb, cgno);
174         if (!ucpi) 
175                 goto failed_unlock;
176         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
177         if (!ufs_cg_chkmagic(sb, ucg)) {
178                 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
179                 goto failed_unlock;
180         }
181
182         for (i = bit; i < end_bit; i += uspi->s_fpb) {
183                 blkno = ufs_fragstoblks(i);
184                 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
185                         ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
186                 }
187                 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
188                 inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
189                 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
190                         ufs_clusteracct (sb, ucpi, blkno, 1);
191
192                 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
193                 uspi->cs_total.cs_nbfree++;
194                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
195
196                 if (uspi->fs_magic != UFS2_MAGIC) {
197                         unsigned cylno = ufs_cbtocylno(i);
198
199                         fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
200                                                   ufs_cbtorpos(i)), 1);
201                         fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
202                 }
203         }
204
205         ubh_mark_buffer_dirty (USPI_UBH(uspi));
206         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
207         if (sb->s_flags & MS_SYNCHRONOUS)
208                 ubh_sync_block(UCPI_UBH(ucpi));
209
210         if (overflow) {
211                 fragment += count;
212                 count = overflow;
213                 goto do_more;
214         }
215
216         ufs_mark_sb_dirty(sb);
217         mutex_unlock(&UFS_SB(sb)->s_lock);
218         UFSD("EXIT\n");
219         return;
220
221 failed_unlock:
222         mutex_unlock(&UFS_SB(sb)->s_lock);
223 failed:
224         UFSD("EXIT (FAILED)\n");
225         return;
226 }
227
228 /*
229  * Modify inode page cache in such way:
230  * have - blocks with b_blocknr equal to oldb...oldb+count-1
231  * get - blocks with b_blocknr equal to newb...newb+count-1
232  * also we suppose that oldb...oldb+count-1 blocks
233  * situated at the end of file.
234  *
235  * We can come here from ufs_writepage or ufs_prepare_write,
236  * locked_page is argument of these functions, so we already lock it.
237  */
238 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
239                                unsigned int count, sector_t oldb,
240                                sector_t newb, struct page *locked_page)
241 {
242         const unsigned blks_per_page =
243                 1 << (PAGE_SHIFT - inode->i_blkbits);
244         const unsigned mask = blks_per_page - 1;
245         struct address_space * const mapping = inode->i_mapping;
246         pgoff_t index, cur_index, last_index;
247         unsigned pos, j, lblock;
248         sector_t end, i;
249         struct page *page;
250         struct buffer_head *head, *bh;
251
252         UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
253               inode->i_ino, count,
254              (unsigned long long)oldb, (unsigned long long)newb);
255
256         BUG_ON(!locked_page);
257         BUG_ON(!PageLocked(locked_page));
258
259         cur_index = locked_page->index;
260         end = count + beg;
261         last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
262         for (i = beg; i < end; i = (i | mask) + 1) {
263                 index = i >> (PAGE_SHIFT - inode->i_blkbits);
264
265                 if (likely(cur_index != index)) {
266                         page = ufs_get_locked_page(mapping, index);
267                         if (!page)/* it was truncated */
268                                 continue;
269                         if (IS_ERR(page)) {/* or EIO */
270                                 ufs_error(inode->i_sb, __func__,
271                                           "read of page %llu failed\n",
272                                           (unsigned long long)index);
273                                 continue;
274                         }
275                 } else
276                         page = locked_page;
277
278                 head = page_buffers(page);
279                 bh = head;
280                 pos = i & mask;
281                 for (j = 0; j < pos; ++j)
282                         bh = bh->b_this_page;
283
284
285                 if (unlikely(index == last_index))
286                         lblock = end & mask;
287                 else
288                         lblock = blks_per_page;
289
290                 do {
291                         if (j >= lblock)
292                                 break;
293                         pos = (i - beg) + j;
294
295                         if (!buffer_mapped(bh))
296                                         map_bh(bh, inode->i_sb, oldb + pos);
297                         if (!buffer_uptodate(bh)) {
298                                 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
299                                 wait_on_buffer(bh);
300                                 if (!buffer_uptodate(bh)) {
301                                         ufs_error(inode->i_sb, __func__,
302                                                   "read of block failed\n");
303                                         break;
304                                 }
305                         }
306
307                         UFSD(" change from %llu to %llu, pos %u\n",
308                              (unsigned long long)(pos + oldb),
309                              (unsigned long long)(pos + newb), pos);
310
311                         bh->b_blocknr = newb + pos;
312                         clean_bdev_bh_alias(bh);
313                         mark_buffer_dirty(bh);
314                         ++j;
315                         bh = bh->b_this_page;
316                 } while (bh != head);
317
318                 if (likely(cur_index != index))
319                         ufs_put_locked_page(page);
320         }
321         UFSD("EXIT\n");
322 }
323
324 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
325                             int sync)
326 {
327         struct buffer_head *bh;
328         sector_t end = beg + n;
329
330         for (; beg < end; ++beg) {
331                 bh = sb_getblk(inode->i_sb, beg);
332                 lock_buffer(bh);
333                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
334                 set_buffer_uptodate(bh);
335                 mark_buffer_dirty(bh);
336                 unlock_buffer(bh);
337                 if (IS_SYNC(inode) || sync)
338                         sync_dirty_buffer(bh);
339                 brelse(bh);
340         }
341 }
342
343 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
344                            u64 goal, unsigned count, int *err,
345                            struct page *locked_page)
346 {
347         struct super_block * sb;
348         struct ufs_sb_private_info * uspi;
349         struct ufs_super_block_first * usb1;
350         unsigned cgno, oldcount, newcount;
351         u64 tmp, request, result;
352         
353         UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
354              inode->i_ino, (unsigned long long)fragment,
355              (unsigned long long)goal, count);
356         
357         sb = inode->i_sb;
358         uspi = UFS_SB(sb)->s_uspi;
359         usb1 = ubh_get_usb_first(uspi);
360         *err = -ENOSPC;
361
362         mutex_lock(&UFS_SB(sb)->s_lock);
363         tmp = ufs_data_ptr_to_cpu(sb, p);
364
365         if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
366                 ufs_warning(sb, "ufs_new_fragments", "internal warning"
367                             " fragment %llu, count %u",
368                             (unsigned long long)fragment, count);
369                 count = uspi->s_fpb - ufs_fragnum(fragment); 
370         }
371         oldcount = ufs_fragnum (fragment);
372         newcount = oldcount + count;
373
374         /*
375          * Somebody else has just allocated our fragments
376          */
377         if (oldcount) {
378                 if (!tmp) {
379                         ufs_error(sb, "ufs_new_fragments", "internal error, "
380                                   "fragment %llu, tmp %llu\n",
381                                   (unsigned long long)fragment,
382                                   (unsigned long long)tmp);
383                         mutex_unlock(&UFS_SB(sb)->s_lock);
384                         return INVBLOCK;
385                 }
386                 if (fragment < UFS_I(inode)->i_lastfrag) {
387                         UFSD("EXIT (ALREADY ALLOCATED)\n");
388                         mutex_unlock(&UFS_SB(sb)->s_lock);
389                         return 0;
390                 }
391         }
392         else {
393                 if (tmp) {
394                         UFSD("EXIT (ALREADY ALLOCATED)\n");
395                         mutex_unlock(&UFS_SB(sb)->s_lock);
396                         return 0;
397                 }
398         }
399
400         /*
401          * There is not enough space for user on the device
402          */
403         if (unlikely(ufs_freespace(uspi, uspi->s_minfree) <= 0)) {
404                 if (!capable(CAP_SYS_RESOURCE)) {
405                         mutex_unlock(&UFS_SB(sb)->s_lock);
406                         UFSD("EXIT (FAILED)\n");
407                         return 0;
408                 }
409         }
410
411         if (goal >= uspi->s_size) 
412                 goal = 0;
413         if (goal == 0) 
414                 cgno = ufs_inotocg (inode->i_ino);
415         else
416                 cgno = ufs_dtog(uspi, goal);
417          
418         /*
419          * allocate new fragment
420          */
421         if (oldcount == 0) {
422                 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
423                 if (result) {
424                         ufs_clear_frags(inode, result + oldcount,
425                                         newcount - oldcount, locked_page != NULL);
426                         write_seqlock(&UFS_I(inode)->meta_lock);
427                         ufs_cpu_to_data_ptr(sb, p, result);
428                         write_sequnlock(&UFS_I(inode)->meta_lock);
429                         *err = 0;
430                         UFS_I(inode)->i_lastfrag =
431                                 max(UFS_I(inode)->i_lastfrag, fragment + count);
432                 }
433                 mutex_unlock(&UFS_SB(sb)->s_lock);
434                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
435                 return result;
436         }
437
438         /*
439          * resize block
440          */
441         result = ufs_add_fragments(inode, tmp, oldcount, newcount);
442         if (result) {
443                 *err = 0;
444                 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
445                                                 fragment + count);
446                 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
447                                 locked_page != NULL);
448                 mutex_unlock(&UFS_SB(sb)->s_lock);
449                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
450                 return result;
451         }
452
453         /*
454          * allocate new block and move data
455          */
456         switch (fs32_to_cpu(sb, usb1->fs_optim)) {
457             case UFS_OPTSPACE:
458                 request = newcount;
459                 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
460                     > uspi->s_dsize * uspi->s_minfree / (2 * 100))
461                         break;
462                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
463                 break;
464             default:
465                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
466         
467             case UFS_OPTTIME:
468                 request = uspi->s_fpb;
469                 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
470                     (uspi->s_minfree - 2) / 100)
471                         break;
472                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
473                 break;
474         }
475         result = ufs_alloc_fragments (inode, cgno, goal, request, err);
476         if (result) {
477                 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
478                                 locked_page != NULL);
479                 ufs_change_blocknr(inode, fragment - oldcount, oldcount,
480                                    uspi->s_sbbase + tmp,
481                                    uspi->s_sbbase + result, locked_page);
482                 write_seqlock(&UFS_I(inode)->meta_lock);
483                 ufs_cpu_to_data_ptr(sb, p, result);
484                 write_sequnlock(&UFS_I(inode)->meta_lock);
485                 *err = 0;
486                 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
487                                                 fragment + count);
488                 mutex_unlock(&UFS_SB(sb)->s_lock);
489                 if (newcount < request)
490                         ufs_free_fragments (inode, result + newcount, request - newcount);
491                 ufs_free_fragments (inode, tmp, oldcount);
492                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
493                 return result;
494         }
495
496         mutex_unlock(&UFS_SB(sb)->s_lock);
497         UFSD("EXIT (FAILED)\n");
498         return 0;
499 }               
500
501 static bool try_add_frags(struct inode *inode, unsigned frags)
502 {
503         unsigned size = frags * i_blocksize(inode);
504         spin_lock(&inode->i_lock);
505         __inode_add_bytes(inode, size);
506         if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
507                 __inode_sub_bytes(inode, size);
508                 spin_unlock(&inode->i_lock);
509                 return false;
510         }
511         spin_unlock(&inode->i_lock);
512         return true;
513 }
514
515 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
516                              unsigned oldcount, unsigned newcount)
517 {
518         struct super_block * sb;
519         struct ufs_sb_private_info * uspi;
520         struct ufs_cg_private_info * ucpi;
521         struct ufs_cylinder_group * ucg;
522         unsigned cgno, fragno, fragoff, count, fragsize, i;
523         
524         UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
525              (unsigned long long)fragment, oldcount, newcount);
526         
527         sb = inode->i_sb;
528         uspi = UFS_SB(sb)->s_uspi;
529         count = newcount - oldcount;
530         
531         cgno = ufs_dtog(uspi, fragment);
532         if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
533                 return 0;
534         if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
535                 return 0;
536         ucpi = ufs_load_cylinder (sb, cgno);
537         if (!ucpi)
538                 return 0;
539         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
540         if (!ufs_cg_chkmagic(sb, ucg)) {
541                 ufs_panic (sb, "ufs_add_fragments",
542                         "internal error, bad magic number on cg %u", cgno);
543                 return 0;
544         }
545
546         fragno = ufs_dtogd(uspi, fragment);
547         fragoff = ufs_fragnum (fragno);
548         for (i = oldcount; i < newcount; i++)
549                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
550                         return 0;
551
552         if (!try_add_frags(inode, count))
553                 return 0;
554         /*
555          * Block can be extended
556          */
557         ucg->cg_time = cpu_to_fs32(sb, get_seconds());
558         for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
559                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
560                         break;
561         fragsize = i - oldcount;
562         if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
563                 ufs_panic (sb, "ufs_add_fragments",
564                         "internal error or corrupted bitmap on cg %u", cgno);
565         fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
566         if (fragsize != count)
567                 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
568         for (i = oldcount; i < newcount; i++)
569                 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
570
571         fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
572         fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
573         uspi->cs_total.cs_nffree -= count;
574         
575         ubh_mark_buffer_dirty (USPI_UBH(uspi));
576         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
577         if (sb->s_flags & MS_SYNCHRONOUS)
578                 ubh_sync_block(UCPI_UBH(ucpi));
579         ufs_mark_sb_dirty(sb);
580
581         UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
582         
583         return fragment;
584 }
585
586 #define UFS_TEST_FREE_SPACE_CG \
587         ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
588         if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
589                 goto cg_found; \
590         for (k = count; k < uspi->s_fpb; k++) \
591                 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
592                         goto cg_found; 
593
594 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
595                                u64 goal, unsigned count, int *err)
596 {
597         struct super_block * sb;
598         struct ufs_sb_private_info * uspi;
599         struct ufs_cg_private_info * ucpi;
600         struct ufs_cylinder_group * ucg;
601         unsigned oldcg, i, j, k, allocsize;
602         u64 result;
603         
604         UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
605              inode->i_ino, cgno, (unsigned long long)goal, count);
606
607         sb = inode->i_sb;
608         uspi = UFS_SB(sb)->s_uspi;
609         oldcg = cgno;
610         
611         /*
612          * 1. searching on preferred cylinder group
613          */
614         UFS_TEST_FREE_SPACE_CG
615
616         /*
617          * 2. quadratic rehash
618          */
619         for (j = 1; j < uspi->s_ncg; j *= 2) {
620                 cgno += j;
621                 if (cgno >= uspi->s_ncg) 
622                         cgno -= uspi->s_ncg;
623                 UFS_TEST_FREE_SPACE_CG
624         }
625
626         /*
627          * 3. brute force search
628          * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
629          */
630         cgno = (oldcg + 1) % uspi->s_ncg;
631         for (j = 2; j < uspi->s_ncg; j++) {
632                 cgno++;
633                 if (cgno >= uspi->s_ncg)
634                         cgno = 0;
635                 UFS_TEST_FREE_SPACE_CG
636         }
637         
638         UFSD("EXIT (FAILED)\n");
639         return 0;
640
641 cg_found:
642         ucpi = ufs_load_cylinder (sb, cgno);
643         if (!ucpi)
644                 return 0;
645         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
646         if (!ufs_cg_chkmagic(sb, ucg)) 
647                 ufs_panic (sb, "ufs_alloc_fragments",
648                         "internal error, bad magic number on cg %u", cgno);
649         ucg->cg_time = cpu_to_fs32(sb, get_seconds());
650
651         if (count == uspi->s_fpb) {
652                 result = ufs_alloccg_block (inode, ucpi, goal, err);
653                 if (result == INVBLOCK)
654                         return 0;
655                 goto succed;
656         }
657
658         for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
659                 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
660                         break;
661         
662         if (allocsize == uspi->s_fpb) {
663                 result = ufs_alloccg_block (inode, ucpi, goal, err);
664                 if (result == INVBLOCK)
665                         return 0;
666                 goal = ufs_dtogd(uspi, result);
667                 for (i = count; i < uspi->s_fpb; i++)
668                         ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
669                 i = uspi->s_fpb - count;
670
671                 inode_sub_bytes(inode, i << uspi->s_fshift);
672                 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
673                 uspi->cs_total.cs_nffree += i;
674                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
675                 fs32_add(sb, &ucg->cg_frsum[i], 1);
676                 goto succed;
677         }
678
679         result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
680         if (result == INVBLOCK)
681                 return 0;
682         if (!try_add_frags(inode, count))
683                 return 0;
684         for (i = 0; i < count; i++)
685                 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
686         
687         fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
688         uspi->cs_total.cs_nffree -= count;
689         fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
690         fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
691
692         if (count != allocsize)
693                 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
694
695 succed:
696         ubh_mark_buffer_dirty (USPI_UBH(uspi));
697         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
698         if (sb->s_flags & MS_SYNCHRONOUS)
699                 ubh_sync_block(UCPI_UBH(ucpi));
700         ufs_mark_sb_dirty(sb);
701
702         result += cgno * uspi->s_fpg;
703         UFSD("EXIT3, result %llu\n", (unsigned long long)result);
704         return result;
705 }
706
707 static u64 ufs_alloccg_block(struct inode *inode,
708                              struct ufs_cg_private_info *ucpi,
709                              u64 goal, int *err)
710 {
711         struct super_block * sb;
712         struct ufs_sb_private_info * uspi;
713         struct ufs_cylinder_group * ucg;
714         u64 result, blkno;
715
716         UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
717
718         sb = inode->i_sb;
719         uspi = UFS_SB(sb)->s_uspi;
720         ucg = ubh_get_ucg(UCPI_UBH(ucpi));
721
722         if (goal == 0) {
723                 goal = ucpi->c_rotor;
724                 goto norot;
725         }
726         goal = ufs_blknum (goal);
727         goal = ufs_dtogd(uspi, goal);
728         
729         /*
730          * If the requested block is available, use it.
731          */
732         if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
733                 result = goal;
734                 goto gotit;
735         }
736         
737 norot:  
738         result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
739         if (result == INVBLOCK)
740                 return INVBLOCK;
741         ucpi->c_rotor = result;
742 gotit:
743         if (!try_add_frags(inode, uspi->s_fpb))
744                 return 0;
745         blkno = ufs_fragstoblks(result);
746         ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
747         if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
748                 ufs_clusteracct (sb, ucpi, blkno, -1);
749
750         fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
751         uspi->cs_total.cs_nbfree--;
752         fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
753
754         if (uspi->fs_magic != UFS2_MAGIC) {
755                 unsigned cylno = ufs_cbtocylno((unsigned)result);
756
757                 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
758                                           ufs_cbtorpos((unsigned)result)), 1);
759                 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
760         }
761         
762         UFSD("EXIT, result %llu\n", (unsigned long long)result);
763
764         return result;
765 }
766
767 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
768                           struct ufs_buffer_head *ubh,
769                           unsigned begin, unsigned size,
770                           unsigned char *table, unsigned char mask)
771 {
772         unsigned rest, offset;
773         unsigned char *cp;
774         
775
776         offset = begin & ~uspi->s_fmask;
777         begin >>= uspi->s_fshift;
778         for (;;) {
779                 if ((offset + size) < uspi->s_fsize)
780                         rest = size;
781                 else
782                         rest = uspi->s_fsize - offset;
783                 size -= rest;
784                 cp = ubh->bh[begin]->b_data + offset;
785                 while ((table[*cp++] & mask) == 0 && --rest)
786                         ;
787                 if (rest || !size)
788                         break;
789                 begin++;
790                 offset = 0;
791         }
792         return (size + rest);
793 }
794
795 /*
796  * Find a block of the specified size in the specified cylinder group.
797  * @sp: pointer to super block
798  * @ucpi: pointer to cylinder group info
799  * @goal: near which block we want find new one
800  * @count: specified size
801  */
802 static u64 ufs_bitmap_search(struct super_block *sb,
803                              struct ufs_cg_private_info *ucpi,
804                              u64 goal, unsigned count)
805 {
806         /*
807          * Bit patterns for identifying fragments in the block map
808          * used as ((map & mask_arr) == want_arr)
809          */
810         static const int mask_arr[9] = {
811                 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
812         };
813         static const int want_arr[9] = {
814                 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
815         };
816         struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
817         unsigned start, length, loc;
818         unsigned pos, want, blockmap, mask, end;
819         u64 result;
820
821         UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
822              (unsigned long long)goal, count);
823
824         if (goal)
825                 start = ufs_dtogd(uspi, goal) >> 3;
826         else
827                 start = ucpi->c_frotor >> 3;
828                 
829         length = ((uspi->s_fpg + 7) >> 3) - start;
830         loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
831                 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
832                 1 << (count - 1 + (uspi->s_fpb & 7))); 
833         if (loc == 0) {
834                 length = start + 1;
835                 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
836                                 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
837                                 ufs_fragtable_other,
838                                 1 << (count - 1 + (uspi->s_fpb & 7)));
839                 if (loc == 0) {
840                         ufs_error(sb, "ufs_bitmap_search",
841                                   "bitmap corrupted on cg %u, start %u,"
842                                   " length %u, count %u, freeoff %u\n",
843                                   ucpi->c_cgx, start, length, count,
844                                   ucpi->c_freeoff);
845                         return INVBLOCK;
846                 }
847                 start = 0;
848         }
849         result = (start + length - loc) << 3;
850         ucpi->c_frotor = result;
851
852         /*
853          * found the byte in the map
854          */
855
856         for (end = result + 8; result < end; result += uspi->s_fpb) {
857                 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
858                 blockmap <<= 1;
859                 mask = mask_arr[count];
860                 want = want_arr[count];
861                 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
862                         if ((blockmap & mask) == want) {
863                                 UFSD("EXIT, result %llu\n",
864                                      (unsigned long long)result);
865                                 return result + pos;
866                         }
867                         mask <<= 1;
868                         want <<= 1;
869                 }
870         }
871
872         ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
873                   ucpi->c_cgx);
874         UFSD("EXIT (FAILED)\n");
875         return INVBLOCK;
876 }
877
878 static void ufs_clusteracct(struct super_block * sb,
879         struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
880 {
881         struct ufs_sb_private_info * uspi;
882         int i, start, end, forw, back;
883         
884         uspi = UFS_SB(sb)->s_uspi;
885         if (uspi->s_contigsumsize <= 0)
886                 return;
887
888         if (cnt > 0)
889                 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
890         else
891                 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
892
893         /*
894          * Find the size of the cluster going forward.
895          */
896         start = blkno + 1;
897         end = start + uspi->s_contigsumsize;
898         if ( end >= ucpi->c_nclusterblks)
899                 end = ucpi->c_nclusterblks;
900         i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
901         if (i > end)
902                 i = end;
903         forw = i - start;
904         
905         /*
906          * Find the size of the cluster going backward.
907          */
908         start = blkno - 1;
909         end = start - uspi->s_contigsumsize;
910         if (end < 0 ) 
911                 end = -1;
912         i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
913         if ( i < end) 
914                 i = end;
915         back = start - i;
916         
917         /*
918          * Account for old cluster and the possibly new forward and
919          * back clusters.
920          */
921         i = back + forw + 1;
922         if (i > uspi->s_contigsumsize)
923                 i = uspi->s_contigsumsize;
924         fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
925         if (back > 0)
926                 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
927         if (forw > 0)
928                 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
929 }
930
931
932 static unsigned char ufs_fragtable_8fpb[] = {
933         0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
934         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
935         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
936         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
937         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09, 
938         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
939         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
940         0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
941         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
942         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
943         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
944         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
945         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
946         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
947         0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
948         0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
949 };
950
951 static unsigned char ufs_fragtable_other[] = {
952         0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
953         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
954         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
955         0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
956         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
957         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
958         0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
959         0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
960         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
961         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
962         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
963         0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
964         0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
965         0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
966         0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
967         0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
968 };