]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/ufs/balloc.c
ufs: make ufs_freespace() return signed
[karo-tx-linux.git] / fs / ufs / balloc.c
1 /*
2  *  linux/fs/ufs/balloc.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  *
8  * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
9  */
10
11 #include <linux/fs.h>
12 #include <linux/stat.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/bitops.h>
18 #include <linux/bio.h>
19 #include <asm/byteorder.h>
20
21 #include "ufs_fs.h"
22 #include "ufs.h"
23 #include "swab.h"
24 #include "util.h"
25
26 #define INVBLOCK ((u64)-1L)
27
28 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
29 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
30 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
31 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
32 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
33 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
34
35 /*
36  * Free 'count' fragments from fragment number 'fragment'
37  */
38 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
39 {
40         struct super_block * sb;
41         struct ufs_sb_private_info * uspi;
42         struct ufs_cg_private_info * ucpi;
43         struct ufs_cylinder_group * ucg;
44         unsigned cgno, bit, end_bit, bbase, blkmap, i;
45         u64 blkno;
46         
47         sb = inode->i_sb;
48         uspi = UFS_SB(sb)->s_uspi;
49         
50         UFSD("ENTER, fragment %llu, count %u\n",
51              (unsigned long long)fragment, count);
52         
53         if (ufs_fragnum(fragment) + count > uspi->s_fpg)
54                 ufs_error (sb, "ufs_free_fragments", "internal error");
55
56         mutex_lock(&UFS_SB(sb)->s_lock);
57         
58         cgno = ufs_dtog(uspi, fragment);
59         bit = ufs_dtogd(uspi, fragment);
60         if (cgno >= uspi->s_ncg) {
61                 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
62                 goto failed;
63         }
64                 
65         ucpi = ufs_load_cylinder (sb, cgno);
66         if (!ucpi) 
67                 goto failed;
68         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
69         if (!ufs_cg_chkmagic(sb, ucg)) {
70                 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
71                 goto failed;
72         }
73
74         end_bit = bit + count;
75         bbase = ufs_blknum (bit);
76         blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
77         ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
78         for (i = bit; i < end_bit; i++) {
79                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
80                         ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
81                 else 
82                         ufs_error (sb, "ufs_free_fragments",
83                                    "bit already cleared for fragment %u", i);
84         }
85
86         inode_sub_bytes(inode, count << uspi->s_fshift);
87         fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
88         uspi->cs_total.cs_nffree += count;
89         fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
90         blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
91         ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
92
93         /*
94          * Trying to reassemble free fragments into block
95          */
96         blkno = ufs_fragstoblks (bbase);
97         if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
98                 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
99                 uspi->cs_total.cs_nffree -= uspi->s_fpb;
100                 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
101                 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
102                         ufs_clusteracct (sb, ucpi, blkno, 1);
103                 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
104                 uspi->cs_total.cs_nbfree++;
105                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
106                 if (uspi->fs_magic != UFS2_MAGIC) {
107                         unsigned cylno = ufs_cbtocylno (bbase);
108
109                         fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
110                                                   ufs_cbtorpos(bbase)), 1);
111                         fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
112                 }
113         }
114         
115         ubh_mark_buffer_dirty (USPI_UBH(uspi));
116         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
117         if (sb->s_flags & MS_SYNCHRONOUS)
118                 ubh_sync_block(UCPI_UBH(ucpi));
119         ufs_mark_sb_dirty(sb);
120
121         mutex_unlock(&UFS_SB(sb)->s_lock);
122         UFSD("EXIT\n");
123         return;
124
125 failed:
126         mutex_unlock(&UFS_SB(sb)->s_lock);
127         UFSD("EXIT (FAILED)\n");
128         return;
129 }
130
131 /*
132  * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
133  */
134 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
135 {
136         struct super_block * sb;
137         struct ufs_sb_private_info * uspi;
138         struct ufs_cg_private_info * ucpi;
139         struct ufs_cylinder_group * ucg;
140         unsigned overflow, cgno, bit, end_bit, i;
141         u64 blkno;
142         
143         sb = inode->i_sb;
144         uspi = UFS_SB(sb)->s_uspi;
145
146         UFSD("ENTER, fragment %llu, count %u\n",
147              (unsigned long long)fragment, count);
148         
149         if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
150                 ufs_error (sb, "ufs_free_blocks", "internal error, "
151                            "fragment %llu, count %u\n",
152                            (unsigned long long)fragment, count);
153                 goto failed;
154         }
155
156         mutex_lock(&UFS_SB(sb)->s_lock);
157         
158 do_more:
159         overflow = 0;
160         cgno = ufs_dtog(uspi, fragment);
161         bit = ufs_dtogd(uspi, fragment);
162         if (cgno >= uspi->s_ncg) {
163                 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
164                 goto failed_unlock;
165         }
166         end_bit = bit + count;
167         if (end_bit > uspi->s_fpg) {
168                 overflow = bit + count - uspi->s_fpg;
169                 count -= overflow;
170                 end_bit -= overflow;
171         }
172
173         ucpi = ufs_load_cylinder (sb, cgno);
174         if (!ucpi) 
175                 goto failed_unlock;
176         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
177         if (!ufs_cg_chkmagic(sb, ucg)) {
178                 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
179                 goto failed_unlock;
180         }
181
182         for (i = bit; i < end_bit; i += uspi->s_fpb) {
183                 blkno = ufs_fragstoblks(i);
184                 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
185                         ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
186                 }
187                 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
188                 inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
189                 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
190                         ufs_clusteracct (sb, ucpi, blkno, 1);
191
192                 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
193                 uspi->cs_total.cs_nbfree++;
194                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
195
196                 if (uspi->fs_magic != UFS2_MAGIC) {
197                         unsigned cylno = ufs_cbtocylno(i);
198
199                         fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
200                                                   ufs_cbtorpos(i)), 1);
201                         fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
202                 }
203         }
204
205         ubh_mark_buffer_dirty (USPI_UBH(uspi));
206         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
207         if (sb->s_flags & MS_SYNCHRONOUS)
208                 ubh_sync_block(UCPI_UBH(ucpi));
209
210         if (overflow) {
211                 fragment += count;
212                 count = overflow;
213                 goto do_more;
214         }
215
216         ufs_mark_sb_dirty(sb);
217         mutex_unlock(&UFS_SB(sb)->s_lock);
218         UFSD("EXIT\n");
219         return;
220
221 failed_unlock:
222         mutex_unlock(&UFS_SB(sb)->s_lock);
223 failed:
224         UFSD("EXIT (FAILED)\n");
225         return;
226 }
227
228 /*
229  * Modify inode page cache in such way:
230  * have - blocks with b_blocknr equal to oldb...oldb+count-1
231  * get - blocks with b_blocknr equal to newb...newb+count-1
232  * also we suppose that oldb...oldb+count-1 blocks
233  * situated at the end of file.
234  *
235  * We can come here from ufs_writepage or ufs_prepare_write,
236  * locked_page is argument of these functions, so we already lock it.
237  */
238 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
239                                unsigned int count, sector_t oldb,
240                                sector_t newb, struct page *locked_page)
241 {
242         const unsigned blks_per_page =
243                 1 << (PAGE_SHIFT - inode->i_blkbits);
244         const unsigned mask = blks_per_page - 1;
245         struct address_space * const mapping = inode->i_mapping;
246         pgoff_t index, cur_index, last_index;
247         unsigned pos, j, lblock;
248         sector_t end, i;
249         struct page *page;
250         struct buffer_head *head, *bh;
251
252         UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
253               inode->i_ino, count,
254              (unsigned long long)oldb, (unsigned long long)newb);
255
256         BUG_ON(!locked_page);
257         BUG_ON(!PageLocked(locked_page));
258
259         cur_index = locked_page->index;
260         end = count + beg;
261         last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
262         for (i = beg; i < end; i = (i | mask) + 1) {
263                 index = i >> (PAGE_SHIFT - inode->i_blkbits);
264
265                 if (likely(cur_index != index)) {
266                         page = ufs_get_locked_page(mapping, index);
267                         if (!page)/* it was truncated */
268                                 continue;
269                         if (IS_ERR(page)) {/* or EIO */
270                                 ufs_error(inode->i_sb, __func__,
271                                           "read of page %llu failed\n",
272                                           (unsigned long long)index);
273                                 continue;
274                         }
275                 } else
276                         page = locked_page;
277
278                 head = page_buffers(page);
279                 bh = head;
280                 pos = i & mask;
281                 for (j = 0; j < pos; ++j)
282                         bh = bh->b_this_page;
283
284
285                 if (unlikely(index == last_index))
286                         lblock = end & mask;
287                 else
288                         lblock = blks_per_page;
289
290                 do {
291                         if (j >= lblock)
292                                 break;
293                         pos = (i - beg) + j;
294
295                         if (!buffer_mapped(bh))
296                                         map_bh(bh, inode->i_sb, oldb + pos);
297                         if (!buffer_uptodate(bh)) {
298                                 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
299                                 wait_on_buffer(bh);
300                                 if (!buffer_uptodate(bh)) {
301                                         ufs_error(inode->i_sb, __func__,
302                                                   "read of block failed\n");
303                                         break;
304                                 }
305                         }
306
307                         UFSD(" change from %llu to %llu, pos %u\n",
308                              (unsigned long long)(pos + oldb),
309                              (unsigned long long)(pos + newb), pos);
310
311                         bh->b_blocknr = newb + pos;
312                         clean_bdev_bh_alias(bh);
313                         mark_buffer_dirty(bh);
314                         ++j;
315                         bh = bh->b_this_page;
316                 } while (bh != head);
317
318                 if (likely(cur_index != index))
319                         ufs_put_locked_page(page);
320         }
321         UFSD("EXIT\n");
322 }
323
324 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
325                             int sync)
326 {
327         struct buffer_head *bh;
328         sector_t end = beg + n;
329
330         for (; beg < end; ++beg) {
331                 bh = sb_getblk(inode->i_sb, beg);
332                 lock_buffer(bh);
333                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
334                 set_buffer_uptodate(bh);
335                 mark_buffer_dirty(bh);
336                 unlock_buffer(bh);
337                 if (IS_SYNC(inode) || sync)
338                         sync_dirty_buffer(bh);
339                 brelse(bh);
340         }
341 }
342
343 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
344                            u64 goal, unsigned count, int *err,
345                            struct page *locked_page)
346 {
347         struct super_block * sb;
348         struct ufs_sb_private_info * uspi;
349         struct ufs_super_block_first * usb1;
350         unsigned cgno, oldcount, newcount;
351         u64 tmp, request, result;
352         
353         UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
354              inode->i_ino, (unsigned long long)fragment,
355              (unsigned long long)goal, count);
356         
357         sb = inode->i_sb;
358         uspi = UFS_SB(sb)->s_uspi;
359         usb1 = ubh_get_usb_first(uspi);
360         *err = -ENOSPC;
361
362         mutex_lock(&UFS_SB(sb)->s_lock);
363         tmp = ufs_data_ptr_to_cpu(sb, p);
364
365         if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
366                 ufs_warning(sb, "ufs_new_fragments", "internal warning"
367                             " fragment %llu, count %u",
368                             (unsigned long long)fragment, count);
369                 count = uspi->s_fpb - ufs_fragnum(fragment); 
370         }
371         oldcount = ufs_fragnum (fragment);
372         newcount = oldcount + count;
373
374         /*
375          * Somebody else has just allocated our fragments
376          */
377         if (oldcount) {
378                 if (!tmp) {
379                         ufs_error(sb, "ufs_new_fragments", "internal error, "
380                                   "fragment %llu, tmp %llu\n",
381                                   (unsigned long long)fragment,
382                                   (unsigned long long)tmp);
383                         mutex_unlock(&UFS_SB(sb)->s_lock);
384                         return INVBLOCK;
385                 }
386                 if (fragment < UFS_I(inode)->i_lastfrag) {
387                         UFSD("EXIT (ALREADY ALLOCATED)\n");
388                         mutex_unlock(&UFS_SB(sb)->s_lock);
389                         return 0;
390                 }
391         }
392         else {
393                 if (tmp) {
394                         UFSD("EXIT (ALREADY ALLOCATED)\n");
395                         mutex_unlock(&UFS_SB(sb)->s_lock);
396                         return 0;
397                 }
398         }
399
400         /*
401          * There is not enough space for user on the device
402          */
403         if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
404                 mutex_unlock(&UFS_SB(sb)->s_lock);
405                 UFSD("EXIT (FAILED)\n");
406                 return 0;
407         }
408
409         if (goal >= uspi->s_size) 
410                 goal = 0;
411         if (goal == 0) 
412                 cgno = ufs_inotocg (inode->i_ino);
413         else
414                 cgno = ufs_dtog(uspi, goal);
415          
416         /*
417          * allocate new fragment
418          */
419         if (oldcount == 0) {
420                 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
421                 if (result) {
422                         ufs_clear_frags(inode, result + oldcount,
423                                         newcount - oldcount, locked_page != NULL);
424                         write_seqlock(&UFS_I(inode)->meta_lock);
425                         ufs_cpu_to_data_ptr(sb, p, result);
426                         write_sequnlock(&UFS_I(inode)->meta_lock);
427                         *err = 0;
428                         UFS_I(inode)->i_lastfrag =
429                                 max(UFS_I(inode)->i_lastfrag, fragment + count);
430                 }
431                 mutex_unlock(&UFS_SB(sb)->s_lock);
432                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
433                 return result;
434         }
435
436         /*
437          * resize block
438          */
439         result = ufs_add_fragments(inode, tmp, oldcount, newcount);
440         if (result) {
441                 *err = 0;
442                 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
443                                                 fragment + count);
444                 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
445                                 locked_page != NULL);
446                 mutex_unlock(&UFS_SB(sb)->s_lock);
447                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
448                 return result;
449         }
450
451         /*
452          * allocate new block and move data
453          */
454         switch (fs32_to_cpu(sb, usb1->fs_optim)) {
455             case UFS_OPTSPACE:
456                 request = newcount;
457                 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
458                     > uspi->s_dsize * uspi->s_minfree / (2 * 100))
459                         break;
460                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
461                 break;
462             default:
463                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
464         
465             case UFS_OPTTIME:
466                 request = uspi->s_fpb;
467                 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
468                     (uspi->s_minfree - 2) / 100)
469                         break;
470                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
471                 break;
472         }
473         result = ufs_alloc_fragments (inode, cgno, goal, request, err);
474         if (result) {
475                 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
476                                 locked_page != NULL);
477                 ufs_change_blocknr(inode, fragment - oldcount, oldcount,
478                                    uspi->s_sbbase + tmp,
479                                    uspi->s_sbbase + result, locked_page);
480                 write_seqlock(&UFS_I(inode)->meta_lock);
481                 ufs_cpu_to_data_ptr(sb, p, result);
482                 write_sequnlock(&UFS_I(inode)->meta_lock);
483                 *err = 0;
484                 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
485                                                 fragment + count);
486                 mutex_unlock(&UFS_SB(sb)->s_lock);
487                 if (newcount < request)
488                         ufs_free_fragments (inode, result + newcount, request - newcount);
489                 ufs_free_fragments (inode, tmp, oldcount);
490                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
491                 return result;
492         }
493
494         mutex_unlock(&UFS_SB(sb)->s_lock);
495         UFSD("EXIT (FAILED)\n");
496         return 0;
497 }               
498
499 static bool try_add_frags(struct inode *inode, unsigned frags)
500 {
501         unsigned size = frags * i_blocksize(inode);
502         spin_lock(&inode->i_lock);
503         __inode_add_bytes(inode, size);
504         if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
505                 __inode_sub_bytes(inode, size);
506                 spin_unlock(&inode->i_lock);
507                 return false;
508         }
509         spin_unlock(&inode->i_lock);
510         return true;
511 }
512
513 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
514                              unsigned oldcount, unsigned newcount)
515 {
516         struct super_block * sb;
517         struct ufs_sb_private_info * uspi;
518         struct ufs_cg_private_info * ucpi;
519         struct ufs_cylinder_group * ucg;
520         unsigned cgno, fragno, fragoff, count, fragsize, i;
521         
522         UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
523              (unsigned long long)fragment, oldcount, newcount);
524         
525         sb = inode->i_sb;
526         uspi = UFS_SB(sb)->s_uspi;
527         count = newcount - oldcount;
528         
529         cgno = ufs_dtog(uspi, fragment);
530         if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
531                 return 0;
532         if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
533                 return 0;
534         ucpi = ufs_load_cylinder (sb, cgno);
535         if (!ucpi)
536                 return 0;
537         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
538         if (!ufs_cg_chkmagic(sb, ucg)) {
539                 ufs_panic (sb, "ufs_add_fragments",
540                         "internal error, bad magic number on cg %u", cgno);
541                 return 0;
542         }
543
544         fragno = ufs_dtogd(uspi, fragment);
545         fragoff = ufs_fragnum (fragno);
546         for (i = oldcount; i < newcount; i++)
547                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
548                         return 0;
549
550         if (!try_add_frags(inode, count))
551                 return 0;
552         /*
553          * Block can be extended
554          */
555         ucg->cg_time = cpu_to_fs32(sb, get_seconds());
556         for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
557                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
558                         break;
559         fragsize = i - oldcount;
560         if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
561                 ufs_panic (sb, "ufs_add_fragments",
562                         "internal error or corrupted bitmap on cg %u", cgno);
563         fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
564         if (fragsize != count)
565                 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
566         for (i = oldcount; i < newcount; i++)
567                 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
568
569         fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
570         fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
571         uspi->cs_total.cs_nffree -= count;
572         
573         ubh_mark_buffer_dirty (USPI_UBH(uspi));
574         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
575         if (sb->s_flags & MS_SYNCHRONOUS)
576                 ubh_sync_block(UCPI_UBH(ucpi));
577         ufs_mark_sb_dirty(sb);
578
579         UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
580         
581         return fragment;
582 }
583
584 #define UFS_TEST_FREE_SPACE_CG \
585         ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
586         if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
587                 goto cg_found; \
588         for (k = count; k < uspi->s_fpb; k++) \
589                 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
590                         goto cg_found; 
591
592 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
593                                u64 goal, unsigned count, int *err)
594 {
595         struct super_block * sb;
596         struct ufs_sb_private_info * uspi;
597         struct ufs_cg_private_info * ucpi;
598         struct ufs_cylinder_group * ucg;
599         unsigned oldcg, i, j, k, allocsize;
600         u64 result;
601         
602         UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
603              inode->i_ino, cgno, (unsigned long long)goal, count);
604
605         sb = inode->i_sb;
606         uspi = UFS_SB(sb)->s_uspi;
607         oldcg = cgno;
608         
609         /*
610          * 1. searching on preferred cylinder group
611          */
612         UFS_TEST_FREE_SPACE_CG
613
614         /*
615          * 2. quadratic rehash
616          */
617         for (j = 1; j < uspi->s_ncg; j *= 2) {
618                 cgno += j;
619                 if (cgno >= uspi->s_ncg) 
620                         cgno -= uspi->s_ncg;
621                 UFS_TEST_FREE_SPACE_CG
622         }
623
624         /*
625          * 3. brute force search
626          * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
627          */
628         cgno = (oldcg + 1) % uspi->s_ncg;
629         for (j = 2; j < uspi->s_ncg; j++) {
630                 cgno++;
631                 if (cgno >= uspi->s_ncg)
632                         cgno = 0;
633                 UFS_TEST_FREE_SPACE_CG
634         }
635         
636         UFSD("EXIT (FAILED)\n");
637         return 0;
638
639 cg_found:
640         ucpi = ufs_load_cylinder (sb, cgno);
641         if (!ucpi)
642                 return 0;
643         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
644         if (!ufs_cg_chkmagic(sb, ucg)) 
645                 ufs_panic (sb, "ufs_alloc_fragments",
646                         "internal error, bad magic number on cg %u", cgno);
647         ucg->cg_time = cpu_to_fs32(sb, get_seconds());
648
649         if (count == uspi->s_fpb) {
650                 result = ufs_alloccg_block (inode, ucpi, goal, err);
651                 if (result == INVBLOCK)
652                         return 0;
653                 goto succed;
654         }
655
656         for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
657                 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
658                         break;
659         
660         if (allocsize == uspi->s_fpb) {
661                 result = ufs_alloccg_block (inode, ucpi, goal, err);
662                 if (result == INVBLOCK)
663                         return 0;
664                 goal = ufs_dtogd(uspi, result);
665                 for (i = count; i < uspi->s_fpb; i++)
666                         ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
667                 i = uspi->s_fpb - count;
668
669                 inode_sub_bytes(inode, i << uspi->s_fshift);
670                 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
671                 uspi->cs_total.cs_nffree += i;
672                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
673                 fs32_add(sb, &ucg->cg_frsum[i], 1);
674                 goto succed;
675         }
676
677         result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
678         if (result == INVBLOCK)
679                 return 0;
680         if (!try_add_frags(inode, count))
681                 return 0;
682         for (i = 0; i < count; i++)
683                 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
684         
685         fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
686         uspi->cs_total.cs_nffree -= count;
687         fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
688         fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
689
690         if (count != allocsize)
691                 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
692
693 succed:
694         ubh_mark_buffer_dirty (USPI_UBH(uspi));
695         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
696         if (sb->s_flags & MS_SYNCHRONOUS)
697                 ubh_sync_block(UCPI_UBH(ucpi));
698         ufs_mark_sb_dirty(sb);
699
700         result += cgno * uspi->s_fpg;
701         UFSD("EXIT3, result %llu\n", (unsigned long long)result);
702         return result;
703 }
704
705 static u64 ufs_alloccg_block(struct inode *inode,
706                              struct ufs_cg_private_info *ucpi,
707                              u64 goal, int *err)
708 {
709         struct super_block * sb;
710         struct ufs_sb_private_info * uspi;
711         struct ufs_cylinder_group * ucg;
712         u64 result, blkno;
713
714         UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
715
716         sb = inode->i_sb;
717         uspi = UFS_SB(sb)->s_uspi;
718         ucg = ubh_get_ucg(UCPI_UBH(ucpi));
719
720         if (goal == 0) {
721                 goal = ucpi->c_rotor;
722                 goto norot;
723         }
724         goal = ufs_blknum (goal);
725         goal = ufs_dtogd(uspi, goal);
726         
727         /*
728          * If the requested block is available, use it.
729          */
730         if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
731                 result = goal;
732                 goto gotit;
733         }
734         
735 norot:  
736         result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
737         if (result == INVBLOCK)
738                 return INVBLOCK;
739         ucpi->c_rotor = result;
740 gotit:
741         if (!try_add_frags(inode, uspi->s_fpb))
742                 return 0;
743         blkno = ufs_fragstoblks(result);
744         ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
745         if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
746                 ufs_clusteracct (sb, ucpi, blkno, -1);
747
748         fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
749         uspi->cs_total.cs_nbfree--;
750         fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
751
752         if (uspi->fs_magic != UFS2_MAGIC) {
753                 unsigned cylno = ufs_cbtocylno((unsigned)result);
754
755                 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
756                                           ufs_cbtorpos((unsigned)result)), 1);
757                 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
758         }
759         
760         UFSD("EXIT, result %llu\n", (unsigned long long)result);
761
762         return result;
763 }
764
765 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
766                           struct ufs_buffer_head *ubh,
767                           unsigned begin, unsigned size,
768                           unsigned char *table, unsigned char mask)
769 {
770         unsigned rest, offset;
771         unsigned char *cp;
772         
773
774         offset = begin & ~uspi->s_fmask;
775         begin >>= uspi->s_fshift;
776         for (;;) {
777                 if ((offset + size) < uspi->s_fsize)
778                         rest = size;
779                 else
780                         rest = uspi->s_fsize - offset;
781                 size -= rest;
782                 cp = ubh->bh[begin]->b_data + offset;
783                 while ((table[*cp++] & mask) == 0 && --rest)
784                         ;
785                 if (rest || !size)
786                         break;
787                 begin++;
788                 offset = 0;
789         }
790         return (size + rest);
791 }
792
793 /*
794  * Find a block of the specified size in the specified cylinder group.
795  * @sp: pointer to super block
796  * @ucpi: pointer to cylinder group info
797  * @goal: near which block we want find new one
798  * @count: specified size
799  */
800 static u64 ufs_bitmap_search(struct super_block *sb,
801                              struct ufs_cg_private_info *ucpi,
802                              u64 goal, unsigned count)
803 {
804         /*
805          * Bit patterns for identifying fragments in the block map
806          * used as ((map & mask_arr) == want_arr)
807          */
808         static const int mask_arr[9] = {
809                 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
810         };
811         static const int want_arr[9] = {
812                 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
813         };
814         struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
815         unsigned start, length, loc;
816         unsigned pos, want, blockmap, mask, end;
817         u64 result;
818
819         UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
820              (unsigned long long)goal, count);
821
822         if (goal)
823                 start = ufs_dtogd(uspi, goal) >> 3;
824         else
825                 start = ucpi->c_frotor >> 3;
826                 
827         length = ((uspi->s_fpg + 7) >> 3) - start;
828         loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
829                 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
830                 1 << (count - 1 + (uspi->s_fpb & 7))); 
831         if (loc == 0) {
832                 length = start + 1;
833                 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
834                                 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
835                                 ufs_fragtable_other,
836                                 1 << (count - 1 + (uspi->s_fpb & 7)));
837                 if (loc == 0) {
838                         ufs_error(sb, "ufs_bitmap_search",
839                                   "bitmap corrupted on cg %u, start %u,"
840                                   " length %u, count %u, freeoff %u\n",
841                                   ucpi->c_cgx, start, length, count,
842                                   ucpi->c_freeoff);
843                         return INVBLOCK;
844                 }
845                 start = 0;
846         }
847         result = (start + length - loc) << 3;
848         ucpi->c_frotor = result;
849
850         /*
851          * found the byte in the map
852          */
853
854         for (end = result + 8; result < end; result += uspi->s_fpb) {
855                 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
856                 blockmap <<= 1;
857                 mask = mask_arr[count];
858                 want = want_arr[count];
859                 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
860                         if ((blockmap & mask) == want) {
861                                 UFSD("EXIT, result %llu\n",
862                                      (unsigned long long)result);
863                                 return result + pos;
864                         }
865                         mask <<= 1;
866                         want <<= 1;
867                 }
868         }
869
870         ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
871                   ucpi->c_cgx);
872         UFSD("EXIT (FAILED)\n");
873         return INVBLOCK;
874 }
875
876 static void ufs_clusteracct(struct super_block * sb,
877         struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
878 {
879         struct ufs_sb_private_info * uspi;
880         int i, start, end, forw, back;
881         
882         uspi = UFS_SB(sb)->s_uspi;
883         if (uspi->s_contigsumsize <= 0)
884                 return;
885
886         if (cnt > 0)
887                 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
888         else
889                 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
890
891         /*
892          * Find the size of the cluster going forward.
893          */
894         start = blkno + 1;
895         end = start + uspi->s_contigsumsize;
896         if ( end >= ucpi->c_nclusterblks)
897                 end = ucpi->c_nclusterblks;
898         i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
899         if (i > end)
900                 i = end;
901         forw = i - start;
902         
903         /*
904          * Find the size of the cluster going backward.
905          */
906         start = blkno - 1;
907         end = start - uspi->s_contigsumsize;
908         if (end < 0 ) 
909                 end = -1;
910         i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
911         if ( i < end) 
912                 i = end;
913         back = start - i;
914         
915         /*
916          * Account for old cluster and the possibly new forward and
917          * back clusters.
918          */
919         i = back + forw + 1;
920         if (i > uspi->s_contigsumsize)
921                 i = uspi->s_contigsumsize;
922         fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
923         if (back > 0)
924                 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
925         if (forw > 0)
926                 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
927 }
928
929
930 static unsigned char ufs_fragtable_8fpb[] = {
931         0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
932         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
933         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
934         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
935         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09, 
936         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
937         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
938         0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
939         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
940         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
941         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
942         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
943         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
944         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
945         0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
946         0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
947 };
948
949 static unsigned char ufs_fragtable_other[] = {
950         0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
951         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
952         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
953         0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
954         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
955         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
956         0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
957         0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
958         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
959         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
960         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
961         0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
962         0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
963         0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
964         0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
965         0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
966 };