2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_inode_item.h"
38 #include "xfs_alloc.h"
39 #include "xfs_btree.h"
40 #include "xfs_btree_trace.h"
41 #include "xfs_ialloc.h"
42 #include "xfs_itable.h"
44 #include "xfs_error.h"
45 #include "xfs_quota.h"
48 * Determine the extent state.
57 ASSERT(blks != 0); /* saved for DMIG */
58 return XFS_EXT_UNWRITTEN;
64 * Convert on-disk form of btree root to in-memory form.
69 xfs_bmdr_block_t *dblock,
71 struct xfs_btree_block *rblock,
80 rblock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
81 rblock->bb_level = dblock->bb_level;
82 ASSERT(be16_to_cpu(rblock->bb_level) > 0);
83 rblock->bb_numrecs = dblock->bb_numrecs;
84 rblock->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
85 rblock->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
86 dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0);
87 fkp = XFS_BMDR_KEY_ADDR(dblock, 1);
88 tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
89 fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
90 tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
91 dmxr = be16_to_cpu(dblock->bb_numrecs);
92 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
93 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
97 * Convert a compressed bmap extent record to an uncompressed form.
98 * This code must be in sync with the routines xfs_bmbt_get_startoff,
99 * xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state.
111 ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN));
112 s->br_startoff = ((xfs_fileoff_t)l0 &
113 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
115 s->br_startblock = (((xfs_fsblock_t)l0 & xfs_mask64lo(9)) << 43) |
116 (((xfs_fsblock_t)l1) >> 21);
122 b = (((xfs_dfsbno_t)l0 & xfs_mask64lo(9)) << 43) |
123 (((xfs_dfsbno_t)l1) >> 21);
124 ASSERT((b >> 32) == 0 || isnulldstartblock(b));
125 s->br_startblock = (xfs_fsblock_t)b;
128 s->br_startblock = (xfs_fsblock_t)(((xfs_dfsbno_t)l1) >> 21);
130 #endif /* XFS_BIG_BLKNOS */
131 s->br_blockcount = (xfs_filblks_t)(l1 & xfs_mask64lo(21));
132 /* This is xfs_extent_state() in-line */
134 ASSERT(s->br_blockcount != 0); /* saved for DMIG */
135 st = XFS_EXT_UNWRITTEN;
143 xfs_bmbt_rec_host_t *r,
146 __xfs_bmbt_get_all(r->l0, r->l1, s);
150 * Extract the blockcount field from an in memory bmap extent record.
153 xfs_bmbt_get_blockcount(
154 xfs_bmbt_rec_host_t *r)
156 return (xfs_filblks_t)(r->l1 & xfs_mask64lo(21));
160 * Extract the startblock field from an in memory bmap extent record.
163 xfs_bmbt_get_startblock(
164 xfs_bmbt_rec_host_t *r)
167 return (((xfs_fsblock_t)r->l0 & xfs_mask64lo(9)) << 43) |
168 (((xfs_fsblock_t)r->l1) >> 21);
173 b = (((xfs_dfsbno_t)r->l0 & xfs_mask64lo(9)) << 43) |
174 (((xfs_dfsbno_t)r->l1) >> 21);
175 ASSERT((b >> 32) == 0 || isnulldstartblock(b));
176 return (xfs_fsblock_t)b;
178 return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21);
180 #endif /* XFS_BIG_BLKNOS */
184 * Extract the startoff field from an in memory bmap extent record.
187 xfs_bmbt_get_startoff(
188 xfs_bmbt_rec_host_t *r)
190 return ((xfs_fileoff_t)r->l0 &
191 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
196 xfs_bmbt_rec_host_t *r)
200 ext_flag = (int)((r->l0) >> (64 - BMBT_EXNTFLAG_BITLEN));
201 return xfs_extent_state(xfs_bmbt_get_blockcount(r),
206 * Extract the blockcount field from an on disk bmap extent record.
209 xfs_bmbt_disk_get_blockcount(
212 return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21));
216 * Extract the startoff field from a disk format bmap extent record.
219 xfs_bmbt_disk_get_startoff(
222 return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
223 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
228 * Set all the fields in a bmap extent record from the arguments.
232 xfs_bmbt_rec_host_t *r,
233 xfs_fileoff_t startoff,
234 xfs_fsblock_t startblock,
235 xfs_filblks_t blockcount,
238 int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
240 ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
241 ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0);
242 ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
245 ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0);
247 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
248 ((xfs_bmbt_rec_base_t)startoff << 9) |
249 ((xfs_bmbt_rec_base_t)startblock >> 43);
250 r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
251 ((xfs_bmbt_rec_base_t)blockcount &
252 (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
253 #else /* !XFS_BIG_BLKNOS */
254 if (isnullstartblock(startblock)) {
255 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
256 ((xfs_bmbt_rec_base_t)startoff << 9) |
257 (xfs_bmbt_rec_base_t)xfs_mask64lo(9);
258 r->l1 = xfs_mask64hi(11) |
259 ((xfs_bmbt_rec_base_t)startblock << 21) |
260 ((xfs_bmbt_rec_base_t)blockcount &
261 (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
263 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
264 ((xfs_bmbt_rec_base_t)startoff << 9);
265 r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
266 ((xfs_bmbt_rec_base_t)blockcount &
267 (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
269 #endif /* XFS_BIG_BLKNOS */
273 * Set all the fields in a bmap extent record from the uncompressed form.
277 xfs_bmbt_rec_host_t *r,
280 xfs_bmbt_set_allf(r, s->br_startoff, s->br_startblock,
281 s->br_blockcount, s->br_state);
286 * Set all the fields in a disk format bmap extent record from the arguments.
289 xfs_bmbt_disk_set_allf(
291 xfs_fileoff_t startoff,
292 xfs_fsblock_t startblock,
293 xfs_filblks_t blockcount,
296 int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
298 ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
299 ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0);
300 ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
303 ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0);
306 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
307 ((xfs_bmbt_rec_base_t)startoff << 9) |
308 ((xfs_bmbt_rec_base_t)startblock >> 43));
310 ((xfs_bmbt_rec_base_t)startblock << 21) |
311 ((xfs_bmbt_rec_base_t)blockcount &
312 (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
313 #else /* !XFS_BIG_BLKNOS */
314 if (isnullstartblock(startblock)) {
316 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
317 ((xfs_bmbt_rec_base_t)startoff << 9) |
318 (xfs_bmbt_rec_base_t)xfs_mask64lo(9));
319 r->l1 = cpu_to_be64(xfs_mask64hi(11) |
320 ((xfs_bmbt_rec_base_t)startblock << 21) |
321 ((xfs_bmbt_rec_base_t)blockcount &
322 (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
325 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
326 ((xfs_bmbt_rec_base_t)startoff << 9));
328 ((xfs_bmbt_rec_base_t)startblock << 21) |
329 ((xfs_bmbt_rec_base_t)blockcount &
330 (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
332 #endif /* XFS_BIG_BLKNOS */
336 * Set all the fields in a bmap extent record from the uncompressed form.
339 xfs_bmbt_disk_set_all(
343 xfs_bmbt_disk_set_allf(r, s->br_startoff, s->br_startblock,
344 s->br_blockcount, s->br_state);
348 * Set the blockcount field in a bmap extent record.
351 xfs_bmbt_set_blockcount(
352 xfs_bmbt_rec_host_t *r,
355 ASSERT((v & xfs_mask64hi(43)) == 0);
356 r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64hi(43)) |
357 (xfs_bmbt_rec_base_t)(v & xfs_mask64lo(21));
361 * Set the startblock field in a bmap extent record.
364 xfs_bmbt_set_startblock(
365 xfs_bmbt_rec_host_t *r,
369 ASSERT((v & xfs_mask64hi(12)) == 0);
370 r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64hi(55)) |
371 (xfs_bmbt_rec_base_t)(v >> 43);
372 r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)) |
373 (xfs_bmbt_rec_base_t)(v << 21);
374 #else /* !XFS_BIG_BLKNOS */
375 if (isnullstartblock(v)) {
376 r->l0 |= (xfs_bmbt_rec_base_t)xfs_mask64lo(9);
377 r->l1 = (xfs_bmbt_rec_base_t)xfs_mask64hi(11) |
378 ((xfs_bmbt_rec_base_t)v << 21) |
379 (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
381 r->l0 &= ~(xfs_bmbt_rec_base_t)xfs_mask64lo(9);
382 r->l1 = ((xfs_bmbt_rec_base_t)v << 21) |
383 (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
385 #endif /* XFS_BIG_BLKNOS */
389 * Set the startoff field in a bmap extent record.
392 xfs_bmbt_set_startoff(
393 xfs_bmbt_rec_host_t *r,
396 ASSERT((v & xfs_mask64hi(9)) == 0);
397 r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) xfs_mask64hi(1)) |
398 ((xfs_bmbt_rec_base_t)v << 9) |
399 (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64lo(9));
403 * Set the extent state field in a bmap extent record.
407 xfs_bmbt_rec_host_t *r,
410 ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN);
411 if (v == XFS_EXT_NORM)
412 r->l0 &= xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN);
414 r->l0 |= xfs_mask64hi(BMBT_EXNTFLAG_BITLEN);
418 * Convert in-memory form of btree root to on-disk form.
422 struct xfs_mount *mp,
423 struct xfs_btree_block *rblock,
425 xfs_bmdr_block_t *dblock,
434 ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC);
435 ASSERT(be64_to_cpu(rblock->bb_u.l.bb_leftsib) == NULLDFSBNO);
436 ASSERT(be64_to_cpu(rblock->bb_u.l.bb_rightsib) == NULLDFSBNO);
437 ASSERT(be16_to_cpu(rblock->bb_level) > 0);
438 dblock->bb_level = rblock->bb_level;
439 dblock->bb_numrecs = rblock->bb_numrecs;
440 dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0);
441 fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
442 tkp = XFS_BMDR_KEY_ADDR(dblock, 1);
443 fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
444 tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
445 dmxr = be16_to_cpu(dblock->bb_numrecs);
446 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
447 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
451 * Check extent records, which have just been read, for
452 * any bit in the extent flag field. ASSERT on debug
453 * kernels, as this condition should not occur.
454 * Return an error condition (1) if any flags found,
455 * otherwise return 0.
459 xfs_check_nostate_extents(
464 for (; num > 0; num--, idx++) {
465 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
467 (64 - BMBT_EXNTFLAG_BITLEN)) != 0) {
476 STATIC struct xfs_btree_cur *
478 struct xfs_btree_cur *cur)
480 struct xfs_btree_cur *new;
482 new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
483 cur->bc_private.b.ip, cur->bc_private.b.whichfork);
486 * Copy the firstblock, flist, and flags values,
487 * since init cursor doesn't get them.
489 new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
490 new->bc_private.b.flist = cur->bc_private.b.flist;
491 new->bc_private.b.flags = cur->bc_private.b.flags;
497 xfs_bmbt_update_cursor(
498 struct xfs_btree_cur *src,
499 struct xfs_btree_cur *dst)
501 ASSERT((dst->bc_private.b.firstblock != NULLFSBLOCK) ||
502 (dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
503 ASSERT(dst->bc_private.b.flist == src->bc_private.b.flist);
505 dst->bc_private.b.allocated += src->bc_private.b.allocated;
506 dst->bc_private.b.firstblock = src->bc_private.b.firstblock;
508 src->bc_private.b.allocated = 0;
512 xfs_bmbt_alloc_block(
513 struct xfs_btree_cur *cur,
514 union xfs_btree_ptr *start,
515 union xfs_btree_ptr *new,
519 xfs_alloc_arg_t args; /* block allocation args */
520 int error; /* error return value */
522 memset(&args, 0, sizeof(args));
523 args.tp = cur->bc_tp;
524 args.mp = cur->bc_mp;
525 args.fsbno = cur->bc_private.b.firstblock;
526 args.firstblock = args.fsbno;
528 if (args.fsbno == NULLFSBLOCK) {
529 args.fsbno = be64_to_cpu(start->l);
530 args.type = XFS_ALLOCTYPE_START_BNO;
532 * Make sure there is sufficient room left in the AG to
533 * complete a full tree split for an extent insert. If
534 * we are converting the middle part of an extent then
535 * we may need space for two tree splits.
537 * We are relying on the caller to make the correct block
538 * reservation for this operation to succeed. If the
539 * reservation amount is insufficient then we may fail a
540 * block allocation here and corrupt the filesystem.
542 args.minleft = xfs_trans_get_block_res(args.tp);
543 } else if (cur->bc_private.b.flist->xbf_low) {
544 args.type = XFS_ALLOCTYPE_START_BNO;
546 args.type = XFS_ALLOCTYPE_NEAR_BNO;
549 args.minlen = args.maxlen = args.prod = 1;
550 args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
551 if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) {
552 error = XFS_ERROR(ENOSPC);
555 error = xfs_alloc_vextent(&args);
559 if (args.fsbno == NULLFSBLOCK && args.minleft) {
561 * Could not find an AG with enough free space to satisfy
562 * a full btree split. Try again without minleft and if
563 * successful activate the lowspace algorithm.
566 args.type = XFS_ALLOCTYPE_FIRST_AG;
568 error = xfs_alloc_vextent(&args);
571 cur->bc_private.b.flist->xbf_low = 1;
573 if (args.fsbno == NULLFSBLOCK) {
574 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
578 ASSERT(args.len == 1);
579 cur->bc_private.b.firstblock = args.fsbno;
580 cur->bc_private.b.allocated++;
581 cur->bc_private.b.ip->i_d.di_nblocks++;
582 xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
583 xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
584 XFS_TRANS_DQ_BCOUNT, 1L);
586 new->l = cpu_to_be64(args.fsbno);
588 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
593 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
599 struct xfs_btree_cur *cur,
602 struct xfs_mount *mp = cur->bc_mp;
603 struct xfs_inode *ip = cur->bc_private.b.ip;
604 struct xfs_trans *tp = cur->bc_tp;
605 xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
607 xfs_bmap_add_free(fsbno, 1, cur->bc_private.b.flist, mp);
608 ip->i_d.di_nblocks--;
610 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
611 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
612 xfs_trans_binval(tp, bp);
617 xfs_bmbt_get_minrecs(
618 struct xfs_btree_cur *cur,
621 if (level == cur->bc_nlevels - 1) {
622 struct xfs_ifork *ifp;
624 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
625 cur->bc_private.b.whichfork);
627 return xfs_bmbt_maxrecs(cur->bc_mp,
628 ifp->if_broot_bytes, level == 0) / 2;
631 return cur->bc_mp->m_bmap_dmnr[level != 0];
635 xfs_bmbt_get_maxrecs(
636 struct xfs_btree_cur *cur,
639 if (level == cur->bc_nlevels - 1) {
640 struct xfs_ifork *ifp;
642 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
643 cur->bc_private.b.whichfork);
645 return xfs_bmbt_maxrecs(cur->bc_mp,
646 ifp->if_broot_bytes, level == 0);
649 return cur->bc_mp->m_bmap_dmxr[level != 0];
654 * Get the maximum records we could store in the on-disk format.
656 * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but
657 * for the root node this checks the available space in the dinode fork
658 * so that we can resize the in-memory buffer to match it. After a
659 * resize to the maximum size this function returns the same value
660 * as xfs_bmbt_get_maxrecs for the root node, too.
663 xfs_bmbt_get_dmaxrecs(
664 struct xfs_btree_cur *cur,
667 if (level != cur->bc_nlevels - 1)
668 return cur->bc_mp->m_bmap_dmxr[level != 0];
669 return xfs_bmdr_maxrecs(cur->bc_mp, cur->bc_private.b.forksize,
674 xfs_bmbt_init_key_from_rec(
675 union xfs_btree_key *key,
676 union xfs_btree_rec *rec)
678 key->bmbt.br_startoff =
679 cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt));
683 xfs_bmbt_init_rec_from_key(
684 union xfs_btree_key *key,
685 union xfs_btree_rec *rec)
687 ASSERT(key->bmbt.br_startoff != 0);
689 xfs_bmbt_disk_set_allf(&rec->bmbt, be64_to_cpu(key->bmbt.br_startoff),
694 xfs_bmbt_init_rec_from_cur(
695 struct xfs_btree_cur *cur,
696 union xfs_btree_rec *rec)
698 xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
702 xfs_bmbt_init_ptr_from_cur(
703 struct xfs_btree_cur *cur,
704 union xfs_btree_ptr *ptr)
711 struct xfs_btree_cur *cur,
712 union xfs_btree_key *key)
714 return (__int64_t)be64_to_cpu(key->bmbt.br_startoff) -
715 cur->bc_rec.b.br_startoff;
720 xfs_bmbt_keys_inorder(
721 struct xfs_btree_cur *cur,
722 union xfs_btree_key *k1,
723 union xfs_btree_key *k2)
725 return be64_to_cpu(k1->bmbt.br_startoff) <
726 be64_to_cpu(k2->bmbt.br_startoff);
730 xfs_bmbt_recs_inorder(
731 struct xfs_btree_cur *cur,
732 union xfs_btree_rec *r1,
733 union xfs_btree_rec *r2)
735 return xfs_bmbt_disk_get_startoff(&r1->bmbt) +
736 xfs_bmbt_disk_get_blockcount(&r1->bmbt) <=
737 xfs_bmbt_disk_get_startoff(&r2->bmbt);
741 #ifdef XFS_BTREE_TRACE
742 ktrace_t *xfs_bmbt_trace_buf;
745 xfs_bmbt_trace_enter(
746 struct xfs_btree_cur *cur,
763 struct xfs_inode *ip = cur->bc_private.b.ip;
764 int whichfork = cur->bc_private.b.whichfork;
766 ktrace_enter(xfs_bmbt_trace_buf,
767 (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
768 (void *)func, (void *)s, (void *)ip, (void *)cur,
769 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
770 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
771 (void *)a8, (void *)a9, (void *)a10);
772 ktrace_enter(ip->i_btrace,
773 (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
774 (void *)func, (void *)s, (void *)ip, (void *)cur,
775 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
776 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
777 (void *)a8, (void *)a9, (void *)a10);
781 xfs_bmbt_trace_cursor(
782 struct xfs_btree_cur *cur,
787 struct xfs_bmbt_rec_host r;
789 xfs_bmbt_set_all(&r, &cur->bc_rec.b);
791 *s0 = (cur->bc_nlevels << 24) |
792 (cur->bc_private.b.flags << 16) |
793 cur->bc_private.b.allocated;
800 struct xfs_btree_cur *cur,
801 union xfs_btree_key *key,
805 *l0 = be64_to_cpu(key->bmbt.br_startoff);
809 /* Endian flipping versions of the bmbt extraction functions */
811 xfs_bmbt_disk_get_all(
815 __xfs_bmbt_get_all(get_unaligned_be64(&r->l0),
816 get_unaligned_be64(&r->l1), s);
820 xfs_bmbt_trace_record(
821 struct xfs_btree_cur *cur,
822 union xfs_btree_rec *rec,
827 struct xfs_bmbt_irec irec;
829 xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
830 *l0 = irec.br_startoff;
831 *l1 = irec.br_startblock;
832 *l2 = irec.br_blockcount;
834 #endif /* XFS_BTREE_TRACE */
836 static const struct xfs_btree_ops xfs_bmbt_ops = {
837 .rec_len = sizeof(xfs_bmbt_rec_t),
838 .key_len = sizeof(xfs_bmbt_key_t),
840 .dup_cursor = xfs_bmbt_dup_cursor,
841 .update_cursor = xfs_bmbt_update_cursor,
842 .alloc_block = xfs_bmbt_alloc_block,
843 .free_block = xfs_bmbt_free_block,
844 .get_maxrecs = xfs_bmbt_get_maxrecs,
845 .get_minrecs = xfs_bmbt_get_minrecs,
846 .get_dmaxrecs = xfs_bmbt_get_dmaxrecs,
847 .init_key_from_rec = xfs_bmbt_init_key_from_rec,
848 .init_rec_from_key = xfs_bmbt_init_rec_from_key,
849 .init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
850 .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
851 .key_diff = xfs_bmbt_key_diff,
854 .keys_inorder = xfs_bmbt_keys_inorder,
855 .recs_inorder = xfs_bmbt_recs_inorder,
858 #ifdef XFS_BTREE_TRACE
859 .trace_enter = xfs_bmbt_trace_enter,
860 .trace_cursor = xfs_bmbt_trace_cursor,
861 .trace_key = xfs_bmbt_trace_key,
862 .trace_record = xfs_bmbt_trace_record,
867 * Allocate a new bmap btree cursor.
869 struct xfs_btree_cur * /* new bmap btree cursor */
870 xfs_bmbt_init_cursor(
871 struct xfs_mount *mp, /* file system mount point */
872 struct xfs_trans *tp, /* transaction pointer */
873 struct xfs_inode *ip, /* inode owning the btree */
874 int whichfork) /* data or attr fork */
876 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
877 struct xfs_btree_cur *cur;
879 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
883 cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
884 cur->bc_btnum = XFS_BTNUM_BMAP;
885 cur->bc_blocklog = mp->m_sb.sb_blocklog;
887 cur->bc_ops = &xfs_bmbt_ops;
888 cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
890 cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
891 cur->bc_private.b.ip = ip;
892 cur->bc_private.b.firstblock = NULLFSBLOCK;
893 cur->bc_private.b.flist = NULL;
894 cur->bc_private.b.allocated = 0;
895 cur->bc_private.b.flags = 0;
896 cur->bc_private.b.whichfork = whichfork;
902 * Calculate number of records in a bmap btree block.
906 struct xfs_mount *mp,
910 blocklen -= XFS_BMBT_BLOCK_LEN(mp);
913 return blocklen / sizeof(xfs_bmbt_rec_t);
914 return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t));
918 * Calculate number of records in a bmap btree inode root.
922 struct xfs_mount *mp,
926 blocklen -= sizeof(xfs_bmdr_block_t);
929 return blocklen / sizeof(xfs_bmdr_rec_t);
930 return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t));