2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 #include <linux/ufs_fs.h>
11 #include <linux/stat.h>
12 #include <linux/time.h>
13 #include <linux/string.h>
14 #include <linux/quotaops.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/sched.h>
18 #include <linux/bitops.h>
19 #include <asm/byteorder.h>
24 static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
25 static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
26 static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
27 static unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
28 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
29 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
32 * Free 'count' fragments from fragment number 'fragment'
34 void ufs_free_fragments(struct inode *inode, unsigned fragment, unsigned count)
36 struct super_block * sb;
37 struct ufs_sb_private_info * uspi;
38 struct ufs_super_block_first * usb1;
39 struct ufs_cg_private_info * ucpi;
40 struct ufs_cylinder_group * ucg;
41 unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
44 uspi = UFS_SB(sb)->s_uspi;
45 usb1 = ubh_get_usb_first(uspi);
47 UFSD("ENTER, fragment %u, count %u\n", fragment, count);
49 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
50 ufs_error (sb, "ufs_free_fragments", "internal error");
54 cgno = ufs_dtog(fragment);
55 bit = ufs_dtogd(fragment);
56 if (cgno >= uspi->s_ncg) {
57 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
61 ucpi = ufs_load_cylinder (sb, cgno);
64 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
65 if (!ufs_cg_chkmagic(sb, ucg)) {
66 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
70 end_bit = bit + count;
71 bbase = ufs_blknum (bit);
72 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
73 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
74 for (i = bit; i < end_bit; i++) {
75 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
76 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
78 ufs_error (sb, "ufs_free_fragments",
79 "bit already cleared for fragment %u", i);
82 DQUOT_FREE_BLOCK (inode, count);
85 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
86 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count);
87 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
88 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
89 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
92 * Trying to reassemble free fragments into block
94 blkno = ufs_fragstoblks (bbase);
95 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
96 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
97 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb);
98 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
99 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
100 ufs_clusteracct (sb, ucpi, blkno, 1);
101 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
102 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
103 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
104 cylno = ufs_cbtocylno (bbase);
105 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
106 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
109 ubh_mark_buffer_dirty (USPI_UBH(uspi));
110 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
111 if (sb->s_flags & MS_SYNCHRONOUS) {
112 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
113 ubh_wait_on_buffer (UCPI_UBH(ucpi));
123 UFSD("EXIT (FAILED)\n");
128 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
130 void ufs_free_blocks(struct inode *inode, unsigned fragment, unsigned count)
132 struct super_block * sb;
133 struct ufs_sb_private_info * uspi;
134 struct ufs_super_block_first * usb1;
135 struct ufs_cg_private_info * ucpi;
136 struct ufs_cylinder_group * ucg;
137 unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
140 uspi = UFS_SB(sb)->s_uspi;
141 usb1 = ubh_get_usb_first(uspi);
143 UFSD("ENTER, fragment %u, count %u\n", fragment, count);
145 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
146 ufs_error (sb, "ufs_free_blocks", "internal error, "
147 "fragment %u, count %u\n", fragment, count);
155 cgno = ufs_dtog (fragment);
156 bit = ufs_dtogd (fragment);
157 if (cgno >= uspi->s_ncg) {
158 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
161 end_bit = bit + count;
162 if (end_bit > uspi->s_fpg) {
163 overflow = bit + count - uspi->s_fpg;
168 ucpi = ufs_load_cylinder (sb, cgno);
171 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
172 if (!ufs_cg_chkmagic(sb, ucg)) {
173 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
177 for (i = bit; i < end_bit; i += uspi->s_fpb) {
178 blkno = ufs_fragstoblks(i);
179 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
180 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
182 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
183 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
184 ufs_clusteracct (sb, ucpi, blkno, 1);
185 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
187 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
188 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
189 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
190 cylno = ufs_cbtocylno(i);
191 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
192 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
195 ubh_mark_buffer_dirty (USPI_UBH(uspi));
196 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
197 if (sb->s_flags & MS_SYNCHRONOUS) {
198 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
199 ubh_wait_on_buffer (UCPI_UBH(ucpi));
216 UFSD("EXIT (FAILED)\n");
220 static struct page *ufs_get_locked_page(struct address_space *mapping,
226 page = find_lock_page(mapping, index);
228 page = read_cache_page(mapping, index,
229 (filler_t*)mapping->a_ops->readpage,
232 printk(KERN_ERR "ufs_change_blocknr: "
233 "read_cache_page error: ino %lu, index: %lu\n",
234 mapping->host->i_ino, index);
240 if (!PageUptodate(page) || PageError(page)) {
242 page_cache_release(page);
244 printk(KERN_ERR "ufs_change_blocknr: "
245 "can not read page: ino %lu, index: %lu\n",
246 mapping->host->i_ino, index);
248 page = ERR_PTR(-EIO);
253 if (unlikely(!page->mapping || !page_has_buffers(page))) {
255 page_cache_release(page);
256 goto try_again;/*we really need these buffers*/
263 * Modify inode page cache in such way:
264 * have - blocks with b_blocknr equal to oldb...oldb+count-1
265 * get - blocks with b_blocknr equal to newb...newb+count-1
266 * also we suppose that oldb...oldb+count-1 blocks
267 * situated at the end of file.
269 * We can come here from ufs_writepage or ufs_prepare_write,
270 * locked_page is argument of these functions, so we already lock it.
272 static void ufs_change_blocknr(struct inode *inode, unsigned int count,
273 unsigned int oldb, unsigned int newb,
274 struct page *locked_page)
276 unsigned int blk_per_page = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
278 struct address_space *mapping = inode->i_mapping;
279 pgoff_t index, cur_index = locked_page->index;
282 struct buffer_head *head, *bh;
284 baseblk = ((i_size_read(inode) - 1) >> inode->i_blkbits) + 1 - count;
286 UFSD("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
287 inode->i_ino, count, oldb, newb);
289 BUG_ON(!PageLocked(locked_page));
291 for (i = 0; i < count; i += blk_per_page) {
292 index = (baseblk+i) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
294 if (likely(cur_index != index)) {
295 page = ufs_get_locked_page(mapping, index);
302 head = page_buffers(page);
305 if (likely(bh->b_blocknr == j + oldb && j < count)) {
306 unmap_underlying_metadata(bh->b_bdev,
308 bh->b_blocknr = newb + j++;
309 mark_buffer_dirty(bh);
312 bh = bh->b_this_page;
313 } while (bh != head);
315 set_page_dirty(page);
317 if (likely(cur_index != index)) {
319 page_cache_release(page);
325 unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
326 unsigned goal, unsigned count, int * err, struct page *locked_page)
328 struct super_block * sb;
329 struct ufs_sb_private_info * uspi;
330 struct ufs_super_block_first * usb1;
331 unsigned cgno, oldcount, newcount, tmp, request, result;
333 UFSD("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count);
336 uspi = UFS_SB(sb)->s_uspi;
337 usb1 = ubh_get_usb_first(uspi);
342 tmp = fs32_to_cpu(sb, *p);
343 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
344 ufs_warning (sb, "ufs_new_fragments", "internal warning"
345 " fragment %u, count %u", fragment, count);
346 count = uspi->s_fpb - ufs_fragnum(fragment);
348 oldcount = ufs_fragnum (fragment);
349 newcount = oldcount + count;
352 * Somebody else has just allocated our fragments
356 ufs_error (sb, "ufs_new_fragments", "internal error, "
357 "fragment %u, tmp %u\n", fragment, tmp);
361 if (fragment < UFS_I(inode)->i_lastfrag) {
362 UFSD("EXIT (ALREADY ALLOCATED)\n");
369 UFSD("EXIT (ALREADY ALLOCATED)\n");
376 * There is not enough space for user on the device
378 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(usb1, UFS_MINFREE) <= 0) {
380 UFSD("EXIT (FAILED)\n");
384 if (goal >= uspi->s_size)
387 cgno = ufs_inotocg (inode->i_ino);
389 cgno = ufs_dtog (goal);
392 * allocate new fragment
395 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
397 *p = cpu_to_fs32(sb, result);
399 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
402 UFSD("EXIT, result %u\n", result);
409 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
412 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
414 UFSD("EXIT, result %u\n", result);
419 * allocate new block and move data
421 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
424 if (uspi->s_minfree < 5 || fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree)
425 > uspi->s_dsize * uspi->s_minfree / (2 * 100) )
427 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
430 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
433 request = uspi->s_fpb;
434 if (fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree) < uspi->s_dsize *
435 (uspi->s_minfree - 2) / 100)
437 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
440 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
442 ufs_change_blocknr(inode, oldcount, tmp, result, locked_page);
444 *p = cpu_to_fs32(sb, result);
446 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
448 if (newcount < request)
449 ufs_free_fragments (inode, result + newcount, request - newcount);
450 ufs_free_fragments (inode, tmp, oldcount);
451 UFSD("EXIT, result %u\n", result);
456 UFSD("EXIT (FAILED)\n");
461 ufs_add_fragments (struct inode * inode, unsigned fragment,
462 unsigned oldcount, unsigned newcount, int * err)
464 struct super_block * sb;
465 struct ufs_sb_private_info * uspi;
466 struct ufs_super_block_first * usb1;
467 struct ufs_cg_private_info * ucpi;
468 struct ufs_cylinder_group * ucg;
469 unsigned cgno, fragno, fragoff, count, fragsize, i;
471 UFSD("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount);
474 uspi = UFS_SB(sb)->s_uspi;
475 usb1 = ubh_get_usb_first (uspi);
476 count = newcount - oldcount;
478 cgno = ufs_dtog(fragment);
479 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
481 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
483 ucpi = ufs_load_cylinder (sb, cgno);
486 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
487 if (!ufs_cg_chkmagic(sb, ucg)) {
488 ufs_panic (sb, "ufs_add_fragments",
489 "internal error, bad magic number on cg %u", cgno);
493 fragno = ufs_dtogd (fragment);
494 fragoff = ufs_fragnum (fragno);
495 for (i = oldcount; i < newcount; i++)
496 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
499 * Block can be extended
501 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
502 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
503 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
505 fragsize = i - oldcount;
506 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
507 ufs_panic (sb, "ufs_add_fragments",
508 "internal error or corrupted bitmap on cg %u", cgno);
509 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
510 if (fragsize != count)
511 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
512 for (i = oldcount; i < newcount; i++)
513 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
514 if(DQUOT_ALLOC_BLOCK(inode, count)) {
519 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
520 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
521 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
523 ubh_mark_buffer_dirty (USPI_UBH(uspi));
524 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
525 if (sb->s_flags & MS_SYNCHRONOUS) {
526 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
527 ubh_wait_on_buffer (UCPI_UBH(ucpi));
531 UFSD("EXIT, fragment %u\n", fragment);
536 #define UFS_TEST_FREE_SPACE_CG \
537 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
538 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
540 for (k = count; k < uspi->s_fpb; k++) \
541 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
544 static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
545 unsigned goal, unsigned count, int * err)
547 struct super_block * sb;
548 struct ufs_sb_private_info * uspi;
549 struct ufs_super_block_first * usb1;
550 struct ufs_cg_private_info * ucpi;
551 struct ufs_cylinder_group * ucg;
552 unsigned oldcg, i, j, k, result, allocsize;
554 UFSD("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count);
557 uspi = UFS_SB(sb)->s_uspi;
558 usb1 = ubh_get_usb_first(uspi);
562 * 1. searching on preferred cylinder group
564 UFS_TEST_FREE_SPACE_CG
567 * 2. quadratic rehash
569 for (j = 1; j < uspi->s_ncg; j *= 2) {
571 if (cgno >= uspi->s_ncg)
573 UFS_TEST_FREE_SPACE_CG
577 * 3. brute force search
578 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
580 cgno = (oldcg + 1) % uspi->s_ncg;
581 for (j = 2; j < uspi->s_ncg; j++) {
583 if (cgno >= uspi->s_ncg)
585 UFS_TEST_FREE_SPACE_CG
588 UFSD("EXIT (FAILED)\n");
592 ucpi = ufs_load_cylinder (sb, cgno);
595 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
596 if (!ufs_cg_chkmagic(sb, ucg))
597 ufs_panic (sb, "ufs_alloc_fragments",
598 "internal error, bad magic number on cg %u", cgno);
599 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
601 if (count == uspi->s_fpb) {
602 result = ufs_alloccg_block (inode, ucpi, goal, err);
603 if (result == (unsigned)-1)
608 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
609 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
612 if (allocsize == uspi->s_fpb) {
613 result = ufs_alloccg_block (inode, ucpi, goal, err);
614 if (result == (unsigned)-1)
616 goal = ufs_dtogd (result);
617 for (i = count; i < uspi->s_fpb; i++)
618 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
619 i = uspi->s_fpb - count;
620 DQUOT_FREE_BLOCK(inode, i);
622 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
623 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, i);
624 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
625 fs32_add(sb, &ucg->cg_frsum[i], 1);
629 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
630 if (result == (unsigned)-1)
632 if(DQUOT_ALLOC_BLOCK(inode, count)) {
636 for (i = 0; i < count; i++)
637 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
639 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
640 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
641 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
642 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
644 if (count != allocsize)
645 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
648 ubh_mark_buffer_dirty (USPI_UBH(uspi));
649 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
650 if (sb->s_flags & MS_SYNCHRONOUS) {
651 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
652 ubh_wait_on_buffer (UCPI_UBH(ucpi));
656 result += cgno * uspi->s_fpg;
657 UFSD("EXIT3, result %u\n", result);
661 static unsigned ufs_alloccg_block (struct inode * inode,
662 struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
664 struct super_block * sb;
665 struct ufs_sb_private_info * uspi;
666 struct ufs_super_block_first * usb1;
667 struct ufs_cylinder_group * ucg;
668 unsigned result, cylno, blkno;
670 UFSD("ENTER, goal %u\n", goal);
673 uspi = UFS_SB(sb)->s_uspi;
674 usb1 = ubh_get_usb_first(uspi);
675 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
678 goal = ucpi->c_rotor;
681 goal = ufs_blknum (goal);
682 goal = ufs_dtogd (goal);
685 * If the requested block is available, use it.
687 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
693 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
694 if (result == (unsigned)-1)
696 ucpi->c_rotor = result;
698 blkno = ufs_fragstoblks(result);
699 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
700 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
701 ufs_clusteracct (sb, ucpi, blkno, -1);
702 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
707 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
708 fs32_sub(sb, &usb1->fs_cstotal.cs_nbfree, 1);
709 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
710 cylno = ufs_cbtocylno(result);
711 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
712 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
714 UFSD("EXIT, result %u\n", result);
719 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
720 struct ufs_buffer_head *ubh,
721 unsigned begin, unsigned size,
722 unsigned char *table, unsigned char mask)
724 unsigned rest, offset;
728 offset = begin & ~uspi->s_fmask;
729 begin >>= uspi->s_fshift;
731 if ((offset + size) < uspi->s_fsize)
734 rest = uspi->s_fsize - offset;
736 cp = ubh->bh[begin]->b_data + offset;
737 while ((table[*cp++] & mask) == 0 && --rest)
744 return (size + rest);
748 * Find a block of the specified size in the specified cylinder group.
749 * @sp: pointer to super block
750 * @ucpi: pointer to cylinder group info
751 * @goal: near which block we want find new one
752 * @count: specified size
754 static unsigned ufs_bitmap_search(struct super_block *sb,
755 struct ufs_cg_private_info *ucpi,
756 unsigned goal, unsigned count)
759 * Bit patterns for identifying fragments in the block map
760 * used as ((map & mask_arr) == want_arr)
762 static const int mask_arr[9] = {
763 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
765 static const int want_arr[9] = {
766 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
768 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
769 struct ufs_super_block_first *usb1;
770 struct ufs_cylinder_group *ucg;
771 unsigned start, length, loc, result;
772 unsigned pos, want, blockmap, mask, end;
774 UFSD("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count);
776 usb1 = ubh_get_usb_first (uspi);
777 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
780 start = ufs_dtogd(goal) >> 3;
782 start = ucpi->c_frotor >> 3;
784 length = ((uspi->s_fpg + 7) >> 3) - start;
785 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
786 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
787 1 << (count - 1 + (uspi->s_fpb & 7)));
790 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
791 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
793 1 << (count - 1 + (uspi->s_fpb & 7)));
795 ufs_error(sb, "ufs_bitmap_search",
796 "bitmap corrupted on cg %u, start %u,"
797 " length %u, count %u, freeoff %u\n",
798 ucpi->c_cgx, start, length, count,
804 result = (start + length - loc) << 3;
805 ucpi->c_frotor = result;
808 * found the byte in the map
811 for (end = result + 8; result < end; result += uspi->s_fpb) {
812 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
814 mask = mask_arr[count];
815 want = want_arr[count];
816 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
817 if ((blockmap & mask) == want) {
818 UFSD("EXIT, result %u\n", result);
826 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
828 UFSD("EXIT (FAILED)\n");
832 static void ufs_clusteracct(struct super_block * sb,
833 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
835 struct ufs_sb_private_info * uspi;
836 int i, start, end, forw, back;
838 uspi = UFS_SB(sb)->s_uspi;
839 if (uspi->s_contigsumsize <= 0)
843 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
845 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
848 * Find the size of the cluster going forward.
851 end = start + uspi->s_contigsumsize;
852 if ( end >= ucpi->c_nclusterblks)
853 end = ucpi->c_nclusterblks;
854 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
860 * Find the size of the cluster going backward.
863 end = start - uspi->s_contigsumsize;
866 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
872 * Account for old cluster and the possibly new forward and
876 if (i > uspi->s_contigsumsize)
877 i = uspi->s_contigsumsize;
878 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
880 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
882 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
886 static unsigned char ufs_fragtable_8fpb[] = {
887 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
888 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
889 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
890 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
891 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
892 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
893 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
894 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
895 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
896 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
897 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
898 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
899 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
900 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
901 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
902 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
905 static unsigned char ufs_fragtable_other[] = {
906 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
907 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
908 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
909 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
910 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
911 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
912 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
913 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
914 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
915 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
916 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
917 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
918 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
919 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
920 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
921 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,