]> git.karo-electronics.de Git - mv-sheeva.git/blob - fs/udf/balloc.c
udf: remove some ugly macros
[mv-sheeva.git] / fs / udf / balloc.c
1 /*
2  * balloc.c
3  *
4  * PURPOSE
5  *      Block allocation handling routines for the OSTA-UDF(tm) filesystem.
6  *
7  * COPYRIGHT
8  *      This file is distributed under the terms of the GNU General Public
9  *      License (GPL). Copies of the GPL can be obtained from:
10  *              ftp://prep.ai.mit.edu/pub/gnu/GPL
11  *      Each contributing author retains all rights to their own work.
12  *
13  *  (C) 1999-2001 Ben Fennema
14  *  (C) 1999 Stelias Computing Inc
15  *
16  * HISTORY
17  *
18  *  02/24/99 blf  Created.
19  *
20  */
21
22 #include "udfdecl.h"
23
24 #include <linux/quotaops.h>
25 #include <linux/buffer_head.h>
26 #include <linux/bitops.h>
27
28 #include "udf_i.h"
29 #include "udf_sb.h"
30
31 #define udf_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
32 #define udf_set_bit(nr,addr) ext2_set_bit(nr,addr)
33 #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
34 #define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size)
35 #define udf_find_next_one_bit(addr, size, offset) find_next_one_bit(addr, size, offset)
36
37 #define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x)
38 #define leNUM_to_cpup(x,y) xleNUM_to_cpup(x,y)
39 #define xleNUM_to_cpup(x,y) (le ## x ## _to_cpup(y))
40 #define uintBPL_t uint(BITS_PER_LONG)
41 #define uint(x) xuint(x)
42 #define xuint(x) __le ## x
43
44 static inline int find_next_one_bit(void *addr, int size, int offset)
45 {
46         uintBPL_t *p = ((uintBPL_t *) addr) + (offset / BITS_PER_LONG);
47         int result = offset & ~(BITS_PER_LONG - 1);
48         unsigned long tmp;
49
50         if (offset >= size)
51                 return size;
52         size -= result;
53         offset &= (BITS_PER_LONG - 1);
54         if (offset) {
55                 tmp = leBPL_to_cpup(p++);
56                 tmp &= ~0UL << offset;
57                 if (size < BITS_PER_LONG)
58                         goto found_first;
59                 if (tmp)
60                         goto found_middle;
61                 size -= BITS_PER_LONG;
62                 result += BITS_PER_LONG;
63         }
64         while (size & ~(BITS_PER_LONG - 1)) {
65                 if ((tmp = leBPL_to_cpup(p++)))
66                         goto found_middle;
67                 result += BITS_PER_LONG;
68                 size -= BITS_PER_LONG;
69         }
70         if (!size)
71                 return result;
72         tmp = leBPL_to_cpup(p);
73 found_first:
74         tmp &= ~0UL >> (BITS_PER_LONG - size);
75 found_middle:
76         return result + ffz(~tmp);
77 }
78
79 #define find_first_one_bit(addr, size)\
80         find_next_one_bit((addr), (size), 0)
81
82 static int read_block_bitmap(struct super_block *sb,
83                              struct udf_bitmap *bitmap, unsigned int block,
84                              unsigned long bitmap_nr)
85 {
86         struct buffer_head *bh = NULL;
87         int retval = 0;
88         kernel_lb_addr loc;
89
90         loc.logicalBlockNum = bitmap->s_extPosition;
91         loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
92
93         bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
94         if (!bh) {
95                 retval = -EIO;
96         }
97         bitmap->s_block_bitmap[bitmap_nr] = bh;
98         return retval;
99 }
100
101 static int __load_block_bitmap(struct super_block *sb,
102                                struct udf_bitmap *bitmap,
103                                unsigned int block_group)
104 {
105         int retval = 0;
106         int nr_groups = bitmap->s_nr_groups;
107
108         if (block_group >= nr_groups) {
109                 udf_debug("block_group (%d) > nr_groups (%d)\n", block_group,
110                           nr_groups);
111         }
112
113         if (bitmap->s_block_bitmap[block_group]) {
114                 return block_group;
115         } else {
116                 retval = read_block_bitmap(sb, bitmap, block_group,
117                                            block_group);
118                 if (retval < 0)
119                         return retval;
120                 return block_group;
121         }
122 }
123
124 static inline int load_block_bitmap(struct super_block *sb,
125                                     struct udf_bitmap *bitmap,
126                                     unsigned int block_group)
127 {
128         int slot;
129
130         slot = __load_block_bitmap(sb, bitmap, block_group);
131
132         if (slot < 0)
133                 return slot;
134
135         if (!bitmap->s_block_bitmap[slot])
136                 return -EIO;
137
138         return slot;
139 }
140
141 static void udf_bitmap_free_blocks(struct super_block *sb,
142                                    struct inode *inode,
143                                    struct udf_bitmap *bitmap,
144                                    kernel_lb_addr bloc, uint32_t offset,
145                                    uint32_t count)
146 {
147         struct udf_sb_info *sbi = UDF_SB(sb);
148         struct buffer_head *bh = NULL;
149         unsigned long block;
150         unsigned long block_group;
151         unsigned long bit;
152         unsigned long i;
153         int bitmap_nr;
154         unsigned long overflow;
155
156         mutex_lock(&sbi->s_alloc_mutex);
157         if (bloc.logicalBlockNum < 0 ||
158             (bloc.logicalBlockNum + count) > sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) {
159                 udf_debug("%d < %d || %d + %d > %d\n",
160                           bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
161                           sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len);
162                 goto error_return;
163         }
164
165         block = bloc.logicalBlockNum + offset + (sizeof(struct spaceBitmapDesc) << 3);
166
167 do_more:
168         overflow = 0;
169         block_group = block >> (sb->s_blocksize_bits + 3);
170         bit = block % (sb->s_blocksize << 3);
171
172         /*
173          * Check to see if we are freeing blocks across a group boundary.
174          */
175         if (bit + count > (sb->s_blocksize << 3)) {
176                 overflow = bit + count - (sb->s_blocksize << 3);
177                 count -= overflow;
178         }
179         bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
180         if (bitmap_nr < 0)
181                 goto error_return;
182
183         bh = bitmap->s_block_bitmap[bitmap_nr];
184         for (i = 0; i < count; i++) {
185                 if (udf_set_bit(bit + i, bh->b_data)) {
186                         udf_debug("bit %ld already set\n", bit + i);
187                         udf_debug("byte=%2x\n", ((char *)bh->b_data)[(bit + i) >> 3]);
188                 } else {
189                         if (inode)
190                                 DQUOT_FREE_BLOCK(inode, 1);
191                         if (sbi->s_lvid_bh) {
192                                 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
193                                 lvid->freeSpaceTable[sbi->s_partition] =
194                                         cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[sbi->s_partition]) + 1);
195                         }
196                 }
197         }
198         mark_buffer_dirty(bh);
199         if (overflow) {
200                 block += count;
201                 count = overflow;
202                 goto do_more;
203         }
204 error_return:
205         sb->s_dirt = 1;
206         if (sbi->s_lvid_bh)
207                 mark_buffer_dirty(sbi->s_lvid_bh);
208         mutex_unlock(&sbi->s_alloc_mutex);
209         return;
210 }
211
212 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
213                                       struct inode *inode,
214                                       struct udf_bitmap *bitmap,
215                                       uint16_t partition, uint32_t first_block,
216                                       uint32_t block_count)
217 {
218         struct udf_sb_info *sbi = UDF_SB(sb);
219         int alloc_count = 0;
220         int bit, block, block_group, group_start;
221         int nr_groups, bitmap_nr;
222         struct buffer_head *bh;
223         __u32 part_len;
224
225         mutex_lock(&sbi->s_alloc_mutex);
226         part_len = sbi->s_partmaps[partition].s_partition_len;
227         if (first_block < 0 || first_block >= part_len)
228                 goto out;
229
230         if (first_block + block_count > part_len)
231                 block_count = part_len - first_block;
232
233 repeat:
234         nr_groups = (sbi->s_partmaps[partition].s_partition_len +
235                      (sizeof(struct spaceBitmapDesc) << 3) +
236                      (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8);
237         block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
238         block_group = block >> (sb->s_blocksize_bits + 3);
239         group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
240
241         bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
242         if (bitmap_nr < 0)
243                 goto out;
244         bh = bitmap->s_block_bitmap[bitmap_nr];
245
246         bit = block % (sb->s_blocksize << 3);
247
248         while (bit < (sb->s_blocksize << 3) && block_count > 0) {
249                 if (!udf_test_bit(bit, bh->b_data)) {
250                         goto out;
251                 } else if (DQUOT_PREALLOC_BLOCK(inode, 1)) {
252                         goto out;
253                 } else if (!udf_clear_bit(bit, bh->b_data)) {
254                         udf_debug("bit already cleared for block %d\n", bit);
255                         DQUOT_FREE_BLOCK(inode, 1);
256                         goto out;
257                 }
258                 block_count--;
259                 alloc_count++;
260                 bit++;
261                 block++;
262         }
263         mark_buffer_dirty(bh);
264         if (block_count > 0)
265                 goto repeat;
266 out:
267         if (sbi->s_lvid_bh) {
268                 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
269                 lvid->freeSpaceTable[partition] =
270                         cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - alloc_count);
271                 mark_buffer_dirty(sbi->s_lvid_bh);
272         }
273         sb->s_dirt = 1;
274         mutex_unlock(&sbi->s_alloc_mutex);
275         return alloc_count;
276 }
277
278 static int udf_bitmap_new_block(struct super_block *sb,
279                                 struct inode *inode,
280                                 struct udf_bitmap *bitmap, uint16_t partition,
281                                 uint32_t goal, int *err)
282 {
283         struct udf_sb_info *sbi = UDF_SB(sb);
284         int newbit, bit = 0, block, block_group, group_start;
285         int end_goal, nr_groups, bitmap_nr, i;
286         struct buffer_head *bh = NULL;
287         char *ptr;
288         int newblock = 0;
289
290         *err = -ENOSPC;
291         mutex_lock(&sbi->s_alloc_mutex);
292
293 repeat:
294         if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len)
295                 goal = 0;
296
297         nr_groups = bitmap->s_nr_groups;
298         block = goal + (sizeof(struct spaceBitmapDesc) << 3);
299         block_group = block >> (sb->s_blocksize_bits + 3);
300         group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
301
302         bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
303         if (bitmap_nr < 0)
304                 goto error_return;
305         bh = bitmap->s_block_bitmap[bitmap_nr];
306         ptr = memscan((char *)bh->b_data + group_start, 0xFF,
307                       sb->s_blocksize - group_start);
308
309         if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
310                 bit = block % (sb->s_blocksize << 3);
311                 if (udf_test_bit(bit, bh->b_data))
312                         goto got_block;
313
314                 end_goal = (bit + 63) & ~63;
315                 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
316                 if (bit < end_goal)
317                         goto got_block;
318
319                 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, sb->s_blocksize - ((bit + 7) >> 3));
320                 newbit = (ptr - ((char *)bh->b_data)) << 3;
321                 if (newbit < sb->s_blocksize << 3) {
322                         bit = newbit;
323                         goto search_back;
324                 }
325
326                 newbit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, bit);
327                 if (newbit < sb->s_blocksize << 3) {
328                         bit = newbit;
329                         goto got_block;
330                 }
331         }
332
333         for (i = 0; i < (nr_groups * 2); i++) {
334                 block_group++;
335                 if (block_group >= nr_groups)
336                         block_group = 0;
337                 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
338
339                 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
340                 if (bitmap_nr < 0)
341                         goto error_return;
342                 bh = bitmap->s_block_bitmap[bitmap_nr];
343                 if (i < nr_groups) {
344                         ptr = memscan((char *)bh->b_data + group_start, 0xFF,
345                                       sb->s_blocksize - group_start);
346                         if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
347                                 bit = (ptr - ((char *)bh->b_data)) << 3;
348                                 break;
349                         }
350                 } else {
351                         bit = udf_find_next_one_bit((char *)bh->b_data,
352                                                     sb->s_blocksize << 3,
353                                                     group_start << 3);
354                         if (bit < sb->s_blocksize << 3)
355                                 break;
356                 }
357         }
358         if (i >= (nr_groups * 2)) {
359                 mutex_unlock(&sbi->s_alloc_mutex);
360                 return newblock;
361         }
362         if (bit < sb->s_blocksize << 3)
363                 goto search_back;
364         else
365                 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3);
366         if (bit >= sb->s_blocksize << 3) {
367                 mutex_unlock(&sbi->s_alloc_mutex);
368                 return 0;
369         }
370
371 search_back:
372         for (i = 0; i < 7 && bit > (group_start << 3) && udf_test_bit(bit - 1, bh->b_data); i++, bit--)
373                 ; /* empty loop */
374
375 got_block:
376
377         /*
378          * Check quota for allocation of this block.
379          */
380         if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
381                 mutex_unlock(&sbi->s_alloc_mutex);
382                 *err = -EDQUOT;
383                 return 0;
384         }
385
386         newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
387                 (sizeof(struct spaceBitmapDesc) << 3);
388
389         if (!udf_clear_bit(bit, bh->b_data)) {
390                 udf_debug("bit already cleared for block %d\n", bit);
391                 goto repeat;
392         }
393
394         mark_buffer_dirty(bh);
395
396         if (sbi->s_lvid_bh) {
397                 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
398                 lvid->freeSpaceTable[partition] =
399                         cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - 1);
400                 mark_buffer_dirty(sbi->s_lvid_bh);
401         }
402         sb->s_dirt = 1;
403         mutex_unlock(&sbi->s_alloc_mutex);
404         *err = 0;
405         return newblock;
406
407 error_return:
408         *err = -EIO;
409         mutex_unlock(&sbi->s_alloc_mutex);
410         return 0;
411 }
412
413 static void udf_table_free_blocks(struct super_block *sb,
414                                   struct inode *inode,
415                                   struct inode *table,
416                                   kernel_lb_addr bloc, uint32_t offset,
417                                   uint32_t count)
418 {
419         struct udf_sb_info *sbi = UDF_SB(sb);
420         uint32_t start, end;
421         uint32_t elen;
422         kernel_lb_addr eloc;
423         struct extent_position oepos, epos;
424         int8_t etype;
425         int i;
426
427         mutex_lock(&sbi->s_alloc_mutex);
428         if (bloc.logicalBlockNum < 0 ||
429             (bloc.logicalBlockNum + count) > sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) {
430                 udf_debug("%d < %d || %d + %d > %d\n",
431                           bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
432                           sbi->s_partmaps[bloc.partitionReferenceNum]->s_partition_len);
433                 goto error_return;
434         }
435
436         /* We do this up front - There are some error conditions that could occure,
437            but.. oh well */
438         if (inode)
439                 DQUOT_FREE_BLOCK(inode, count);
440         if (sbi->s_lvid_bh) {
441                 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
442                 lvid->freeSpaceTable[sbi->s_partition] =
443                         cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[sbi->s_partition]) + count);
444                 mark_buffer_dirty(sbi->s_lvid_bh);
445         }
446
447         start = bloc.logicalBlockNum + offset;
448         end = bloc.logicalBlockNum + offset + count - 1;
449
450         epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
451         elen = 0;
452         epos.block = oepos.block = UDF_I_LOCATION(table);
453         epos.bh = oepos.bh = NULL;
454
455         while (count &&
456                (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
457                 if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) == start)) {
458                         if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) {
459                                 count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
460                                 start += ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
461                                 elen = (etype << 30) | (0x40000000 - sb->s_blocksize);
462                         } else {
463                                 elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits));
464                                 start += count;
465                                 count = 0;
466                         }
467                         udf_write_aext(table, &oepos, eloc, elen, 1);
468                 } else if (eloc.logicalBlockNum == (end + 1)) {
469                         if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) {
470                                 count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
471                                 end -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
472                                 eloc.logicalBlockNum -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
473                                 elen = (etype << 30) | (0x40000000 - sb->s_blocksize);
474                         } else {
475                                 eloc.logicalBlockNum = start;
476                                 elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits));
477                                 end -= count;
478                                 count = 0;
479                         }
480                         udf_write_aext(table, &oepos, eloc, elen, 1);
481                 }
482
483                 if (epos.bh != oepos.bh) {
484                         i = -1;
485                         oepos.block = epos.block;
486                         brelse(oepos.bh);
487                         get_bh(epos.bh);
488                         oepos.bh = epos.bh;
489                         oepos.offset = 0;
490                 } else {
491                         oepos.offset = epos.offset;
492                 }
493         }
494
495         if (count) {
496                 /*
497                  * NOTE: we CANNOT use udf_add_aext here, as it can try to allocate
498                  * a new block, and since we hold the super block lock already
499                  * very bad things would happen :)
500                  *
501                  * We copy the behavior of udf_add_aext, but instead of
502                  * trying to allocate a new block close to the existing one,
503                  * we just steal a block from the extent we are trying to add.
504                  *
505                  * It would be nice if the blocks were close together, but it
506                  * isn't required.
507                  */
508
509                 int adsize;
510                 short_ad *sad = NULL;
511                 long_ad *lad = NULL;
512                 struct allocExtDesc *aed;
513
514                 eloc.logicalBlockNum = start;
515                 elen = EXT_RECORDED_ALLOCATED |
516                         (count << sb->s_blocksize_bits);
517
518                 if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) {
519                         adsize = sizeof(short_ad);
520                 } else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG) {
521                         adsize = sizeof(long_ad);
522                 } else {
523                         brelse(oepos.bh);
524                         brelse(epos.bh);
525                         goto error_return;
526                 }
527
528                 if (epos.offset + (2 * adsize) > sb->s_blocksize) {
529                         char *sptr, *dptr;
530                         int loffset;
531
532                         brelse(oepos.bh);
533                         oepos = epos;
534
535                         /* Steal a block from the extent being free'd */
536                         epos.block.logicalBlockNum = eloc.logicalBlockNum;
537                         eloc.logicalBlockNum++;
538                         elen -= sb->s_blocksize;
539
540                         if (!(epos.bh = udf_tread(sb, udf_get_lb_pblock(sb, epos.block, 0)))) {
541                                 brelse(oepos.bh);
542                                 goto error_return;
543                         }
544                         aed = (struct allocExtDesc *)(epos.bh->b_data);
545                         aed->previousAllocExtLocation = cpu_to_le32(oepos.block.logicalBlockNum);
546                         if (epos.offset + adsize > sb->s_blocksize) {
547                                 loffset = epos.offset;
548                                 aed->lengthAllocDescs = cpu_to_le32(adsize);
549                                 sptr = UDF_I_DATA(table) + epos.offset - adsize;
550                                 dptr = epos.bh->b_data + sizeof(struct allocExtDesc);
551                                 memcpy(dptr, sptr, adsize);
552                                 epos.offset = sizeof(struct allocExtDesc) + adsize;
553                         } else {
554                                 loffset = epos.offset + adsize;
555                                 aed->lengthAllocDescs = cpu_to_le32(0);
556                                 if (oepos.bh) {
557                                         sptr = oepos.bh->b_data + epos.offset;
558                                         aed = (struct allocExtDesc *)oepos.bh->b_data;
559                                         aed->lengthAllocDescs =
560                                                 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
561                                 } else {
562                                         sptr = UDF_I_DATA(table) + epos.offset;
563                                         UDF_I_LENALLOC(table) += adsize;
564                                         mark_inode_dirty(table);
565                                 }
566                                 epos.offset = sizeof(struct allocExtDesc);
567                         }
568                         if (sbi->s_udfrev >= 0x0200)
569                                 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1,
570                                             epos.block.logicalBlockNum, sizeof(tag));
571                         else
572                                 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 2, 1,
573                                             epos.block.logicalBlockNum, sizeof(tag));
574
575                         switch (UDF_I_ALLOCTYPE(table)) {
576                                 case ICBTAG_FLAG_AD_SHORT:
577                                         sad = (short_ad *)sptr;
578                                         sad->extLength = cpu_to_le32(
579                                                 EXT_NEXT_EXTENT_ALLOCDECS |
580                                                 sb->s_blocksize);
581                                         sad->extPosition = cpu_to_le32(epos.block.logicalBlockNum);
582                                         break;
583                                 case ICBTAG_FLAG_AD_LONG:
584                                         lad = (long_ad *)sptr;
585                                         lad->extLength = cpu_to_le32(
586                                                 EXT_NEXT_EXTENT_ALLOCDECS |
587                                                 sb->s_blocksize);
588                                         lad->extLocation = cpu_to_lelb(epos.block);
589                                         break;
590                         }
591                         if (oepos.bh) {
592                                 udf_update_tag(oepos.bh->b_data, loffset);
593                                 mark_buffer_dirty(oepos.bh);
594                         } else {
595                                 mark_inode_dirty(table);
596                         }
597                 }
598
599                 if (elen) { /* It's possible that stealing the block emptied the extent */
600                         udf_write_aext(table, &epos, eloc, elen, 1);
601
602                         if (!epos.bh) {
603                                 UDF_I_LENALLOC(table) += adsize;
604                                 mark_inode_dirty(table);
605                         } else {
606                                 aed = (struct allocExtDesc *)epos.bh->b_data;
607                                 aed->lengthAllocDescs =
608                                         cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
609                                 udf_update_tag(epos.bh->b_data, epos.offset);
610                                 mark_buffer_dirty(epos.bh);
611                         }
612                 }
613         }
614
615         brelse(epos.bh);
616         brelse(oepos.bh);
617
618 error_return:
619         sb->s_dirt = 1;
620         mutex_unlock(&sbi->s_alloc_mutex);
621         return;
622 }
623
624 static int udf_table_prealloc_blocks(struct super_block *sb,
625                                      struct inode *inode,
626                                      struct inode *table, uint16_t partition,
627                                      uint32_t first_block, uint32_t block_count)
628 {
629         struct udf_sb_info *sbi = UDF_SB(sb);
630         int alloc_count = 0;
631         uint32_t elen, adsize;
632         kernel_lb_addr eloc;
633         struct extent_position epos;
634         int8_t etype = -1;
635
636         if (first_block < 0 || first_block >= sbi->s_partmaps[partition].s_partition_len)
637                 return 0;
638
639         if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
640                 adsize = sizeof(short_ad);
641         else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
642                 adsize = sizeof(long_ad);
643         else
644                 return 0;
645
646         mutex_lock(&sbi->s_alloc_mutex);
647         epos.offset = sizeof(struct unallocSpaceEntry);
648         epos.block = UDF_I_LOCATION(table);
649         epos.bh = NULL;
650         eloc.logicalBlockNum = 0xFFFFFFFF;
651
652         while (first_block != eloc.logicalBlockNum &&
653                (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
654                 udf_debug("eloc=%d, elen=%d, first_block=%d\n",
655                           eloc.logicalBlockNum, elen, first_block);
656                 ; /* empty loop body */
657         }
658
659         if (first_block == eloc.logicalBlockNum) {
660                 epos.offset -= adsize;
661
662                 alloc_count = (elen >> sb->s_blocksize_bits);
663                 if (inode && DQUOT_PREALLOC_BLOCK(inode, alloc_count > block_count ? block_count : alloc_count)) {
664                         alloc_count = 0;
665                 } else if (alloc_count > block_count) {
666                         alloc_count = block_count;
667                         eloc.logicalBlockNum += alloc_count;
668                         elen -= (alloc_count << sb->s_blocksize_bits);
669                         udf_write_aext(table, &epos, eloc, (etype << 30) | elen, 1);
670                 } else {
671                         udf_delete_aext(table, epos, eloc, (etype << 30) | elen);
672                 }
673         } else {
674                 alloc_count = 0;
675         }
676
677         brelse(epos.bh);
678
679         if (alloc_count && sbi->s_lvid_bh) {
680                 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
681                 lvid->freeSpaceTable[partition] =
682                         cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - alloc_count);
683                 mark_buffer_dirty(sbi->s_lvid_bh);
684                 sb->s_dirt = 1;
685         }
686         mutex_unlock(&sbi->s_alloc_mutex);
687         return alloc_count;
688 }
689
690 static int udf_table_new_block(struct super_block *sb,
691                                struct inode *inode,
692                                struct inode *table, uint16_t partition,
693                                uint32_t goal, int *err)
694 {
695         struct udf_sb_info *sbi = UDF_SB(sb);
696         uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
697         uint32_t newblock = 0, adsize;
698         uint32_t elen, goal_elen = 0;
699         kernel_lb_addr eloc, uninitialized_var(goal_eloc);
700         struct extent_position epos, goal_epos;
701         int8_t etype;
702
703         *err = -ENOSPC;
704
705         if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
706                 adsize = sizeof(short_ad);
707         else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
708                 adsize = sizeof(long_ad);
709         else
710                 return newblock;
711
712         mutex_lock(&sbi->s_alloc_mutex);
713         if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len)
714                 goal = 0;
715
716         /* We search for the closest matching block to goal. If we find a exact hit,
717            we stop. Otherwise we keep going till we run out of extents.
718            We store the buffer_head, bloc, and extoffset of the current closest
719            match and use that when we are done.
720          */
721         epos.offset = sizeof(struct unallocSpaceEntry);
722         epos.block = UDF_I_LOCATION(table);
723         epos.bh = goal_epos.bh = NULL;
724
725         while (spread &&
726                (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
727                 if (goal >= eloc.logicalBlockNum) {
728                         if (goal < eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits))
729                                 nspread = 0;
730                         else
731                                 nspread = goal - eloc.logicalBlockNum -
732                                         (elen >> sb->s_blocksize_bits);
733                 } else {
734                         nspread = eloc.logicalBlockNum - goal;
735                 }
736
737                 if (nspread < spread) {
738                         spread = nspread;
739                         if (goal_epos.bh != epos.bh) {
740                                 brelse(goal_epos.bh);
741                                 goal_epos.bh = epos.bh;
742                                 get_bh(goal_epos.bh);
743                         }
744                         goal_epos.block = epos.block;
745                         goal_epos.offset = epos.offset - adsize;
746                         goal_eloc = eloc;
747                         goal_elen = (etype << 30) | elen;
748                 }
749         }
750
751         brelse(epos.bh);
752
753         if (spread == 0xFFFFFFFF) {
754                 brelse(goal_epos.bh);
755                 mutex_unlock(&sbi->s_alloc_mutex);
756                 return 0;
757         }
758
759         /* Only allocate blocks from the beginning of the extent.
760            That way, we only delete (empty) extents, never have to insert an
761            extent because of splitting */
762         /* This works, but very poorly.... */
763
764         newblock = goal_eloc.logicalBlockNum;
765         goal_eloc.logicalBlockNum++;
766         goal_elen -= sb->s_blocksize;
767
768         if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
769                 brelse(goal_epos.bh);
770                 mutex_unlock(&sbi->s_alloc_mutex);
771                 *err = -EDQUOT;
772                 return 0;
773         }
774
775         if (goal_elen)
776                 udf_write_aext(table, &goal_epos, goal_eloc, goal_elen, 1);
777         else
778                 udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
779         brelse(goal_epos.bh);
780
781         if (sbi->s_lvid_bh) {
782                 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
783                 lvid->freeSpaceTable[partition] =
784                         cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - 1);
785                 mark_buffer_dirty(sbi->s_lvid_bh);
786         }
787
788         sb->s_dirt = 1;
789         mutex_unlock(&sbi->s_alloc_mutex);
790         *err = 0;
791         return newblock;
792 }
793
794 inline void udf_free_blocks(struct super_block *sb,
795                             struct inode *inode,
796                             kernel_lb_addr bloc, uint32_t offset,
797                             uint32_t count)
798 {
799         uint16_t partition = bloc.partitionReferenceNum;
800         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
801
802         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
803                 return udf_bitmap_free_blocks(sb, inode,
804                                               map->s_uspace.s_bitmap,
805                                               bloc, offset, count);
806         } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
807                 return udf_table_free_blocks(sb, inode,
808                                              map->s_uspace.s_table,
809                                              bloc, offset, count);
810         } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
811                 return udf_bitmap_free_blocks(sb, inode,
812                                               map->s_fspace.s_bitmap,
813                                               bloc, offset, count);
814         } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
815                 return udf_table_free_blocks(sb, inode,
816                                              map->s_fspace.s_table,
817                                              bloc, offset, count);
818         } else {
819                 return;
820         }
821 }
822
823 inline int udf_prealloc_blocks(struct super_block *sb,
824                                struct inode *inode,
825                                uint16_t partition, uint32_t first_block,
826                                uint32_t block_count)
827 {
828         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
829
830         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
831                 return udf_bitmap_prealloc_blocks(sb, inode,
832                                                   map->s_uspace.s_bitmap,
833                                                   partition, first_block, block_count);
834         } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
835                 return udf_table_prealloc_blocks(sb, inode,
836                                                  map->s_uspace.s_table,
837                                                  partition, first_block, block_count);
838         } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
839                 return udf_bitmap_prealloc_blocks(sb, inode,
840                                                   map->s_fspace.s_bitmap,
841                                                   partition, first_block, block_count);
842         } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
843                 return udf_table_prealloc_blocks(sb, inode,
844                                                  map->s_fspace.s_table,
845                                                  partition, first_block, block_count);
846         } else {
847                 return 0;
848         }
849 }
850
851 inline int udf_new_block(struct super_block *sb,
852                          struct inode *inode,
853                          uint16_t partition, uint32_t goal, int *err)
854 {
855         int ret;
856         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
857
858         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
859                 ret = udf_bitmap_new_block(sb, inode,
860                                            map->s_uspace.s_bitmap,
861                                            partition, goal, err);
862                 return ret;
863         } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
864                 return udf_table_new_block(sb, inode,
865                                            map->s_uspace.s_table,
866                                            partition, goal, err);
867         } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
868                 return udf_bitmap_new_block(sb, inode,
869                                             map->s_fspace.s_bitmap,
870                                             partition, goal, err);
871         } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
872                 return udf_table_new_block(sb, inode,
873                                            map->s_fspace.s_table,
874                                            partition, goal, err);
875         } else {
876                 *err = -EIO;
877                 return 0;
878         }
879 }