5 * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1999-2001 Ben Fennema
14 * (C) 1999 Stelias Computing Inc
18 * 02/24/99 blf Created.
24 #include <linux/quotaops.h>
25 #include <linux/buffer_head.h>
26 #include <linux/bitops.h>
31 #define udf_clear_bit(nr, addr) ext2_clear_bit(nr, addr)
32 #define udf_set_bit(nr, addr) ext2_set_bit(nr, addr)
33 #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
34 #define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size)
35 #define udf_find_next_one_bit(addr, size, offset) \
36 find_next_one_bit(addr, size, offset)
38 #define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x)
39 #define leNUM_to_cpup(x, y) xleNUM_to_cpup(x, y)
40 #define xleNUM_to_cpup(x, y) (le ## x ## _to_cpup(y))
41 #define uintBPL_t uint(BITS_PER_LONG)
42 #define uint(x) xuint(x)
43 #define xuint(x) __le ## x
45 static inline int find_next_one_bit(void *addr, int size, int offset)
47 uintBPL_t *p = ((uintBPL_t *) addr) + (offset / BITS_PER_LONG);
48 int result = offset & ~(BITS_PER_LONG - 1);
54 offset &= (BITS_PER_LONG - 1);
56 tmp = leBPL_to_cpup(p++);
57 tmp &= ~0UL << offset;
58 if (size < BITS_PER_LONG)
62 size -= BITS_PER_LONG;
63 result += BITS_PER_LONG;
65 while (size & ~(BITS_PER_LONG - 1)) {
66 tmp = leBPL_to_cpup(p++);
69 result += BITS_PER_LONG;
70 size -= BITS_PER_LONG;
74 tmp = leBPL_to_cpup(p);
76 tmp &= ~0UL >> (BITS_PER_LONG - size);
78 return result + ffz(~tmp);
81 #define find_first_one_bit(addr, size)\
82 find_next_one_bit((addr), (size), 0)
84 static int read_block_bitmap(struct super_block *sb,
85 struct udf_bitmap *bitmap, unsigned int block,
86 unsigned long bitmap_nr)
88 struct buffer_head *bh = NULL;
90 struct kernel_lb_addr loc;
92 loc.logicalBlockNum = bitmap->s_extPosition;
93 loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
95 bh = udf_tread(sb, udf_get_lb_pblock(sb, &loc, block));
99 bitmap->s_block_bitmap[bitmap_nr] = bh;
103 static int __load_block_bitmap(struct super_block *sb,
104 struct udf_bitmap *bitmap,
105 unsigned int block_group)
108 int nr_groups = bitmap->s_nr_groups;
110 if (block_group >= nr_groups) {
111 udf_debug("block_group (%d) > nr_groups (%d)\n", block_group,
115 if (bitmap->s_block_bitmap[block_group]) {
118 retval = read_block_bitmap(sb, bitmap, block_group,
126 static inline int load_block_bitmap(struct super_block *sb,
127 struct udf_bitmap *bitmap,
128 unsigned int block_group)
132 slot = __load_block_bitmap(sb, bitmap, block_group);
137 if (!bitmap->s_block_bitmap[slot])
143 static bool udf_add_free_space(struct udf_sb_info *sbi,
144 u16 partition, u32 cnt)
146 struct logicalVolIntegrityDesc *lvid;
148 if (sbi->s_lvid_bh == NULL)
151 lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
152 le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
156 static void udf_bitmap_free_blocks(struct super_block *sb,
158 struct udf_bitmap *bitmap,
159 struct kernel_lb_addr *bloc,
163 struct udf_sb_info *sbi = UDF_SB(sb);
164 struct buffer_head *bh = NULL;
165 struct udf_part_map *partmap;
167 unsigned long block_group;
171 unsigned long overflow;
173 mutex_lock(&sbi->s_alloc_mutex);
174 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
175 if (bloc->logicalBlockNum < 0 ||
176 (bloc->logicalBlockNum + count) >
177 partmap->s_partition_len) {
178 udf_debug("%d < %d || %d + %d > %d\n",
179 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
180 count, partmap->s_partition_len);
184 block = bloc->logicalBlockNum + offset +
185 (sizeof(struct spaceBitmapDesc) << 3);
189 block_group = block >> (sb->s_blocksize_bits + 3);
190 bit = block % (sb->s_blocksize << 3);
193 * Check to see if we are freeing blocks across a group boundary.
195 if (bit + count > (sb->s_blocksize << 3)) {
196 overflow = bit + count - (sb->s_blocksize << 3);
199 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
203 bh = bitmap->s_block_bitmap[bitmap_nr];
204 for (i = 0; i < count; i++) {
205 if (udf_set_bit(bit + i, bh->b_data)) {
206 udf_debug("bit %ld already set\n", bit + i);
207 udf_debug("byte=%2x\n",
208 ((char *)bh->b_data)[(bit + i) >> 3]);
211 vfs_dq_free_block(inode, 1);
212 udf_add_free_space(sbi, sbi->s_partition, 1);
215 mark_buffer_dirty(bh);
225 mark_buffer_dirty(sbi->s_lvid_bh);
226 mutex_unlock(&sbi->s_alloc_mutex);
229 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
231 struct udf_bitmap *bitmap,
232 uint16_t partition, uint32_t first_block,
233 uint32_t block_count)
235 struct udf_sb_info *sbi = UDF_SB(sb);
237 int bit, block, block_group, group_start;
238 int nr_groups, bitmap_nr;
239 struct buffer_head *bh;
242 mutex_lock(&sbi->s_alloc_mutex);
243 part_len = sbi->s_partmaps[partition].s_partition_len;
244 if (first_block < 0 || first_block >= part_len)
247 if (first_block + block_count > part_len)
248 block_count = part_len - first_block;
251 nr_groups = udf_compute_nr_groups(sb, partition);
252 block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
253 block_group = block >> (sb->s_blocksize_bits + 3);
254 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
256 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
259 bh = bitmap->s_block_bitmap[bitmap_nr];
261 bit = block % (sb->s_blocksize << 3);
263 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
264 if (!udf_test_bit(bit, bh->b_data))
266 else if (vfs_dq_prealloc_block(inode, 1))
268 else if (!udf_clear_bit(bit, bh->b_data)) {
269 udf_debug("bit already cleared for block %d\n", bit);
270 vfs_dq_free_block(inode, 1);
278 mark_buffer_dirty(bh);
279 } while (block_count > 0);
282 if (udf_add_free_space(sbi, partition, -alloc_count))
283 mark_buffer_dirty(sbi->s_lvid_bh);
285 mutex_unlock(&sbi->s_alloc_mutex);
289 static int udf_bitmap_new_block(struct super_block *sb,
291 struct udf_bitmap *bitmap, uint16_t partition,
292 uint32_t goal, int *err)
294 struct udf_sb_info *sbi = UDF_SB(sb);
295 int newbit, bit = 0, block, block_group, group_start;
296 int end_goal, nr_groups, bitmap_nr, i;
297 struct buffer_head *bh = NULL;
302 mutex_lock(&sbi->s_alloc_mutex);
305 if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len)
308 nr_groups = bitmap->s_nr_groups;
309 block = goal + (sizeof(struct spaceBitmapDesc) << 3);
310 block_group = block >> (sb->s_blocksize_bits + 3);
311 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
313 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
316 bh = bitmap->s_block_bitmap[bitmap_nr];
317 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
318 sb->s_blocksize - group_start);
320 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
321 bit = block % (sb->s_blocksize << 3);
322 if (udf_test_bit(bit, bh->b_data))
325 end_goal = (bit + 63) & ~63;
326 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
330 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
331 sb->s_blocksize - ((bit + 7) >> 3));
332 newbit = (ptr - ((char *)bh->b_data)) << 3;
333 if (newbit < sb->s_blocksize << 3) {
338 newbit = udf_find_next_one_bit(bh->b_data,
339 sb->s_blocksize << 3, bit);
340 if (newbit < sb->s_blocksize << 3) {
346 for (i = 0; i < (nr_groups * 2); i++) {
348 if (block_group >= nr_groups)
350 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
352 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
355 bh = bitmap->s_block_bitmap[bitmap_nr];
357 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
358 sb->s_blocksize - group_start);
359 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
360 bit = (ptr - ((char *)bh->b_data)) << 3;
364 bit = udf_find_next_one_bit((char *)bh->b_data,
365 sb->s_blocksize << 3,
367 if (bit < sb->s_blocksize << 3)
371 if (i >= (nr_groups * 2)) {
372 mutex_unlock(&sbi->s_alloc_mutex);
375 if (bit < sb->s_blocksize << 3)
378 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
380 if (bit >= sb->s_blocksize << 3) {
381 mutex_unlock(&sbi->s_alloc_mutex);
387 while (i < 7 && bit > (group_start << 3) &&
388 udf_test_bit(bit - 1, bh->b_data)) {
396 * Check quota for allocation of this block.
398 if (inode && vfs_dq_alloc_block(inode, 1)) {
399 mutex_unlock(&sbi->s_alloc_mutex);
404 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
405 (sizeof(struct spaceBitmapDesc) << 3);
407 if (!udf_clear_bit(bit, bh->b_data)) {
408 udf_debug("bit already cleared for block %d\n", bit);
412 mark_buffer_dirty(bh);
414 if (udf_add_free_space(sbi, partition, -1))
415 mark_buffer_dirty(sbi->s_lvid_bh);
417 mutex_unlock(&sbi->s_alloc_mutex);
423 mutex_unlock(&sbi->s_alloc_mutex);
427 static void udf_table_free_blocks(struct super_block *sb,
430 struct kernel_lb_addr *bloc,
434 struct udf_sb_info *sbi = UDF_SB(sb);
435 struct udf_part_map *partmap;
438 struct kernel_lb_addr eloc;
439 struct extent_position oepos, epos;
442 struct udf_inode_info *iinfo;
444 mutex_lock(&sbi->s_alloc_mutex);
445 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
446 if (bloc->logicalBlockNum < 0 ||
447 (bloc->logicalBlockNum + count) >
448 partmap->s_partition_len) {
449 udf_debug("%d < %d || %d + %d > %d\n",
450 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
451 partmap->s_partition_len);
455 iinfo = UDF_I(table);
456 /* We do this up front - There are some error conditions that
457 could occure, but.. oh well */
459 vfs_dq_free_block(inode, count);
460 if (udf_add_free_space(sbi, sbi->s_partition, count))
461 mark_buffer_dirty(sbi->s_lvid_bh);
463 start = bloc->logicalBlockNum + offset;
464 end = bloc->logicalBlockNum + offset + count - 1;
466 epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
468 epos.block = oepos.block = iinfo->i_location;
469 epos.bh = oepos.bh = NULL;
472 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
473 if (((eloc.logicalBlockNum +
474 (elen >> sb->s_blocksize_bits)) == start)) {
475 if ((0x3FFFFFFF - elen) <
476 (count << sb->s_blocksize_bits)) {
477 uint32_t tmp = ((0x3FFFFFFF - elen) >>
478 sb->s_blocksize_bits);
481 elen = (etype << 30) |
482 (0x40000000 - sb->s_blocksize);
484 elen = (etype << 30) |
486 (count << sb->s_blocksize_bits));
490 udf_write_aext(table, &oepos, &eloc, elen, 1);
491 } else if (eloc.logicalBlockNum == (end + 1)) {
492 if ((0x3FFFFFFF - elen) <
493 (count << sb->s_blocksize_bits)) {
494 uint32_t tmp = ((0x3FFFFFFF - elen) >>
495 sb->s_blocksize_bits);
498 eloc.logicalBlockNum -= tmp;
499 elen = (etype << 30) |
500 (0x40000000 - sb->s_blocksize);
502 eloc.logicalBlockNum = start;
503 elen = (etype << 30) |
505 (count << sb->s_blocksize_bits));
509 udf_write_aext(table, &oepos, &eloc, elen, 1);
512 if (epos.bh != oepos.bh) {
514 oepos.block = epos.block;
520 oepos.offset = epos.offset;
526 * NOTE: we CANNOT use udf_add_aext here, as it can try to
527 * allocate a new block, and since we hold the super block
528 * lock already very bad things would happen :)
530 * We copy the behavior of udf_add_aext, but instead of
531 * trying to allocate a new block close to the existing one,
532 * we just steal a block from the extent we are trying to add.
534 * It would be nice if the blocks were close together, but it
539 struct short_ad *sad = NULL;
540 struct long_ad *lad = NULL;
541 struct allocExtDesc *aed;
543 eloc.logicalBlockNum = start;
544 elen = EXT_RECORDED_ALLOCATED |
545 (count << sb->s_blocksize_bits);
547 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
548 adsize = sizeof(struct short_ad);
549 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
550 adsize = sizeof(struct long_ad);
557 if (epos.offset + (2 * adsize) > sb->s_blocksize) {
564 /* Steal a block from the extent being free'd */
565 epos.block.logicalBlockNum = eloc.logicalBlockNum;
566 eloc.logicalBlockNum++;
567 elen -= sb->s_blocksize;
569 epos.bh = udf_tread(sb,
570 udf_get_lb_pblock(sb, &epos.block, 0));
575 aed = (struct allocExtDesc *)(epos.bh->b_data);
576 aed->previousAllocExtLocation =
577 cpu_to_le32(oepos.block.logicalBlockNum);
578 if (epos.offset + adsize > sb->s_blocksize) {
579 loffset = epos.offset;
580 aed->lengthAllocDescs = cpu_to_le32(adsize);
581 sptr = iinfo->i_ext.i_data + epos.offset
583 dptr = epos.bh->b_data +
584 sizeof(struct allocExtDesc);
585 memcpy(dptr, sptr, adsize);
586 epos.offset = sizeof(struct allocExtDesc) +
589 loffset = epos.offset + adsize;
590 aed->lengthAllocDescs = cpu_to_le32(0);
592 sptr = oepos.bh->b_data + epos.offset;
593 aed = (struct allocExtDesc *)
595 le32_add_cpu(&aed->lengthAllocDescs,
598 sptr = iinfo->i_ext.i_data +
600 iinfo->i_lenAlloc += adsize;
601 mark_inode_dirty(table);
603 epos.offset = sizeof(struct allocExtDesc);
605 if (sbi->s_udfrev >= 0x0200)
606 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
607 3, 1, epos.block.logicalBlockNum,
610 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
611 2, 1, epos.block.logicalBlockNum,
614 switch (iinfo->i_alloc_type) {
615 case ICBTAG_FLAG_AD_SHORT:
616 sad = (struct short_ad *)sptr;
617 sad->extLength = cpu_to_le32(
618 EXT_NEXT_EXTENT_ALLOCDECS |
621 cpu_to_le32(epos.block.logicalBlockNum);
623 case ICBTAG_FLAG_AD_LONG:
624 lad = (struct long_ad *)sptr;
625 lad->extLength = cpu_to_le32(
626 EXT_NEXT_EXTENT_ALLOCDECS |
629 cpu_to_lelb(epos.block);
633 udf_update_tag(oepos.bh->b_data, loffset);
634 mark_buffer_dirty(oepos.bh);
636 mark_inode_dirty(table);
640 /* It's possible that stealing the block emptied the extent */
642 udf_write_aext(table, &epos, &eloc, elen, 1);
645 iinfo->i_lenAlloc += adsize;
646 mark_inode_dirty(table);
648 aed = (struct allocExtDesc *)epos.bh->b_data;
649 le32_add_cpu(&aed->lengthAllocDescs, adsize);
650 udf_update_tag(epos.bh->b_data, epos.offset);
651 mark_buffer_dirty(epos.bh);
661 mutex_unlock(&sbi->s_alloc_mutex);
665 static int udf_table_prealloc_blocks(struct super_block *sb,
667 struct inode *table, uint16_t partition,
668 uint32_t first_block, uint32_t block_count)
670 struct udf_sb_info *sbi = UDF_SB(sb);
672 uint32_t elen, adsize;
673 struct kernel_lb_addr eloc;
674 struct extent_position epos;
676 struct udf_inode_info *iinfo;
678 if (first_block < 0 ||
679 first_block >= sbi->s_partmaps[partition].s_partition_len)
682 iinfo = UDF_I(table);
683 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
684 adsize = sizeof(struct short_ad);
685 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
686 adsize = sizeof(struct long_ad);
690 mutex_lock(&sbi->s_alloc_mutex);
691 epos.offset = sizeof(struct unallocSpaceEntry);
692 epos.block = iinfo->i_location;
694 eloc.logicalBlockNum = 0xFFFFFFFF;
696 while (first_block != eloc.logicalBlockNum &&
697 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
698 udf_debug("eloc=%d, elen=%d, first_block=%d\n",
699 eloc.logicalBlockNum, elen, first_block);
700 ; /* empty loop body */
703 if (first_block == eloc.logicalBlockNum) {
704 epos.offset -= adsize;
706 alloc_count = (elen >> sb->s_blocksize_bits);
707 if (inode && vfs_dq_prealloc_block(inode,
708 alloc_count > block_count ? block_count : alloc_count))
710 else if (alloc_count > block_count) {
711 alloc_count = block_count;
712 eloc.logicalBlockNum += alloc_count;
713 elen -= (alloc_count << sb->s_blocksize_bits);
714 udf_write_aext(table, &epos, &eloc,
715 (etype << 30) | elen, 1);
717 udf_delete_aext(table, epos, eloc,
718 (etype << 30) | elen);
725 if (alloc_count && udf_add_free_space(sbi, partition, -alloc_count)) {
726 mark_buffer_dirty(sbi->s_lvid_bh);
729 mutex_unlock(&sbi->s_alloc_mutex);
733 static int udf_table_new_block(struct super_block *sb,
735 struct inode *table, uint16_t partition,
736 uint32_t goal, int *err)
738 struct udf_sb_info *sbi = UDF_SB(sb);
739 uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
740 uint32_t newblock = 0, adsize;
741 uint32_t elen, goal_elen = 0;
742 struct kernel_lb_addr eloc, uninitialized_var(goal_eloc);
743 struct extent_position epos, goal_epos;
745 struct udf_inode_info *iinfo = UDF_I(table);
749 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
750 adsize = sizeof(struct short_ad);
751 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
752 adsize = sizeof(struct long_ad);
756 mutex_lock(&sbi->s_alloc_mutex);
757 if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len)
760 /* We search for the closest matching block to goal. If we find
761 a exact hit, we stop. Otherwise we keep going till we run out
762 of extents. We store the buffer_head, bloc, and extoffset
763 of the current closest match and use that when we are done.
765 epos.offset = sizeof(struct unallocSpaceEntry);
766 epos.block = iinfo->i_location;
767 epos.bh = goal_epos.bh = NULL;
770 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
771 if (goal >= eloc.logicalBlockNum) {
772 if (goal < eloc.logicalBlockNum +
773 (elen >> sb->s_blocksize_bits))
776 nspread = goal - eloc.logicalBlockNum -
777 (elen >> sb->s_blocksize_bits);
779 nspread = eloc.logicalBlockNum - goal;
782 if (nspread < spread) {
784 if (goal_epos.bh != epos.bh) {
785 brelse(goal_epos.bh);
786 goal_epos.bh = epos.bh;
787 get_bh(goal_epos.bh);
789 goal_epos.block = epos.block;
790 goal_epos.offset = epos.offset - adsize;
792 goal_elen = (etype << 30) | elen;
798 if (spread == 0xFFFFFFFF) {
799 brelse(goal_epos.bh);
800 mutex_unlock(&sbi->s_alloc_mutex);
804 /* Only allocate blocks from the beginning of the extent.
805 That way, we only delete (empty) extents, never have to insert an
806 extent because of splitting */
807 /* This works, but very poorly.... */
809 newblock = goal_eloc.logicalBlockNum;
810 goal_eloc.logicalBlockNum++;
811 goal_elen -= sb->s_blocksize;
813 if (inode && vfs_dq_alloc_block(inode, 1)) {
814 brelse(goal_epos.bh);
815 mutex_unlock(&sbi->s_alloc_mutex);
821 udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
823 udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
824 brelse(goal_epos.bh);
826 if (udf_add_free_space(sbi, partition, -1))
827 mark_buffer_dirty(sbi->s_lvid_bh);
830 mutex_unlock(&sbi->s_alloc_mutex);
835 void udf_free_blocks(struct super_block *sb, struct inode *inode,
836 struct kernel_lb_addr *bloc, uint32_t offset,
839 uint16_t partition = bloc->partitionReferenceNum;
840 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
842 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
843 udf_bitmap_free_blocks(sb, inode, map->s_uspace.s_bitmap,
844 bloc, offset, count);
845 } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
846 udf_table_free_blocks(sb, inode, map->s_uspace.s_table,
847 bloc, offset, count);
848 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
849 udf_bitmap_free_blocks(sb, inode, map->s_fspace.s_bitmap,
850 bloc, offset, count);
851 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
852 udf_table_free_blocks(sb, inode, map->s_fspace.s_table,
853 bloc, offset, count);
857 inline int udf_prealloc_blocks(struct super_block *sb,
859 uint16_t partition, uint32_t first_block,
860 uint32_t block_count)
862 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
864 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
865 return udf_bitmap_prealloc_blocks(sb, inode,
866 map->s_uspace.s_bitmap,
867 partition, first_block,
869 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
870 return udf_table_prealloc_blocks(sb, inode,
871 map->s_uspace.s_table,
872 partition, first_block,
874 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
875 return udf_bitmap_prealloc_blocks(sb, inode,
876 map->s_fspace.s_bitmap,
877 partition, first_block,
879 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
880 return udf_table_prealloc_blocks(sb, inode,
881 map->s_fspace.s_table,
882 partition, first_block,
888 inline int udf_new_block(struct super_block *sb,
890 uint16_t partition, uint32_t goal, int *err)
892 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
894 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
895 return udf_bitmap_new_block(sb, inode,
896 map->s_uspace.s_bitmap,
897 partition, goal, err);
898 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
899 return udf_table_new_block(sb, inode,
900 map->s_uspace.s_table,
901 partition, goal, err);
902 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
903 return udf_bitmap_new_block(sb, inode,
904 map->s_fspace.s_bitmap,
905 partition, goal, err);
906 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
907 return udf_table_new_block(sb, inode,
908 map->s_fspace.s_table,
909 partition, goal, err);