2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * Copyright (c) 2013 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
28 #include "xfs_mount.h"
29 #include "xfs_da_format.h"
30 #include "xfs_da_btree.h"
31 #include "xfs_bmap_btree.h"
33 #include "xfs_dir2_priv.h"
34 #include "xfs_dinode.h"
35 #include "xfs_inode.h"
36 #include "xfs_trans.h"
37 #include "xfs_inode_item.h"
38 #include "xfs_alloc.h"
41 #include "xfs_attr_leaf.h"
42 #include "xfs_error.h"
43 #include "xfs_trace.h"
44 #include "xfs_cksum.h"
45 #include "xfs_buf_item.h"
50 * Routines to implement directories as Btrees of hashed names.
53 /*========================================================================
54 * Function prototypes for the kernel.
55 *========================================================================*/
58 * Routines used for growing the Btree.
60 STATIC int xfs_da3_root_split(xfs_da_state_t *state,
61 xfs_da_state_blk_t *existing_root,
62 xfs_da_state_blk_t *new_child);
63 STATIC int xfs_da3_node_split(xfs_da_state_t *state,
64 xfs_da_state_blk_t *existing_blk,
65 xfs_da_state_blk_t *split_blk,
66 xfs_da_state_blk_t *blk_to_add,
69 STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state,
70 xfs_da_state_blk_t *node_blk_1,
71 xfs_da_state_blk_t *node_blk_2);
72 STATIC void xfs_da3_node_add(xfs_da_state_t *state,
73 xfs_da_state_blk_t *old_node_blk,
74 xfs_da_state_blk_t *new_node_blk);
77 * Routines used for shrinking the Btree.
79 STATIC int xfs_da3_root_join(xfs_da_state_t *state,
80 xfs_da_state_blk_t *root_blk);
81 STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval);
82 STATIC void xfs_da3_node_remove(xfs_da_state_t *state,
83 xfs_da_state_blk_t *drop_blk);
84 STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state,
85 xfs_da_state_blk_t *src_node_blk,
86 xfs_da_state_blk_t *dst_node_blk);
91 STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state,
92 xfs_da_state_blk_t *drop_blk,
93 xfs_da_state_blk_t *save_blk);
96 kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
99 * Allocate a dir-state structure.
100 * We don't put them on the stack since they're large.
103 xfs_da_state_alloc(void)
105 return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
109 * Kill the altpath contents of a da-state structure.
112 xfs_da_state_kill_altpath(xfs_da_state_t *state)
116 for (i = 0; i < state->altpath.active; i++)
117 state->altpath.blk[i].bp = NULL;
118 state->altpath.active = 0;
122 * Free a da-state structure.
125 xfs_da_state_free(xfs_da_state_t *state)
127 xfs_da_state_kill_altpath(state);
129 memset((char *)state, 0, sizeof(*state));
131 kmem_zone_free(xfs_da_state_zone, state);
135 xfs_da3_node_hdr_from_disk(
136 struct xfs_da3_icnode_hdr *to,
137 struct xfs_da_intnode *from)
139 ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
140 from->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
142 if (from->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
143 struct xfs_da3_node_hdr *hdr3 = (struct xfs_da3_node_hdr *)from;
145 to->forw = be32_to_cpu(hdr3->info.hdr.forw);
146 to->back = be32_to_cpu(hdr3->info.hdr.back);
147 to->magic = be16_to_cpu(hdr3->info.hdr.magic);
148 to->count = be16_to_cpu(hdr3->__count);
149 to->level = be16_to_cpu(hdr3->__level);
152 to->forw = be32_to_cpu(from->hdr.info.forw);
153 to->back = be32_to_cpu(from->hdr.info.back);
154 to->magic = be16_to_cpu(from->hdr.info.magic);
155 to->count = be16_to_cpu(from->hdr.__count);
156 to->level = be16_to_cpu(from->hdr.__level);
160 xfs_da3_node_hdr_to_disk(
161 struct xfs_da_intnode *to,
162 struct xfs_da3_icnode_hdr *from)
164 ASSERT(from->magic == XFS_DA_NODE_MAGIC ||
165 from->magic == XFS_DA3_NODE_MAGIC);
167 if (from->magic == XFS_DA3_NODE_MAGIC) {
168 struct xfs_da3_node_hdr *hdr3 = (struct xfs_da3_node_hdr *)to;
170 hdr3->info.hdr.forw = cpu_to_be32(from->forw);
171 hdr3->info.hdr.back = cpu_to_be32(from->back);
172 hdr3->info.hdr.magic = cpu_to_be16(from->magic);
173 hdr3->__count = cpu_to_be16(from->count);
174 hdr3->__level = cpu_to_be16(from->level);
177 to->hdr.info.forw = cpu_to_be32(from->forw);
178 to->hdr.info.back = cpu_to_be32(from->back);
179 to->hdr.info.magic = cpu_to_be16(from->magic);
180 to->hdr.__count = cpu_to_be16(from->count);
181 to->hdr.__level = cpu_to_be16(from->level);
188 struct xfs_mount *mp = bp->b_target->bt_mount;
189 struct xfs_da_intnode *hdr = bp->b_addr;
190 struct xfs_da3_icnode_hdr ichdr;
192 xfs_da3_node_hdr_from_disk(&ichdr, hdr);
194 if (xfs_sb_version_hascrc(&mp->m_sb)) {
195 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
197 if (ichdr.magic != XFS_DA3_NODE_MAGIC)
200 if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_uuid))
202 if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
205 if (ichdr.magic != XFS_DA_NODE_MAGIC)
208 if (ichdr.level == 0)
210 if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
212 if (ichdr.count == 0)
216 * we don't know if the node is for and attribute or directory tree,
217 * so only fail if the count is outside both bounds
219 if (ichdr.count > mp->m_dir_node_ents &&
220 ichdr.count > mp->m_attr_node_ents)
223 /* XXX: hash order check? */
229 xfs_da3_node_write_verify(
232 struct xfs_mount *mp = bp->b_target->bt_mount;
233 struct xfs_buf_log_item *bip = bp->b_fspriv;
234 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
236 if (!xfs_da3_node_verify(bp)) {
237 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
238 xfs_buf_ioerror(bp, EFSCORRUPTED);
242 if (!xfs_sb_version_hascrc(&mp->m_sb))
246 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
248 xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), XFS_DA3_NODE_CRC_OFF);
252 * leaf/node format detection on trees is sketchy, so a node read can be done on
253 * leaf level blocks when detection identifies the tree as a node format tree
254 * incorrectly. In this case, we need to swap the verifier to match the correct
255 * format of the block being read.
258 xfs_da3_node_read_verify(
261 struct xfs_mount *mp = bp->b_target->bt_mount;
262 struct xfs_da_blkinfo *info = bp->b_addr;
264 switch (be16_to_cpu(info->magic)) {
265 case XFS_DA3_NODE_MAGIC:
266 if (!xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
267 XFS_DA3_NODE_CRC_OFF))
270 case XFS_DA_NODE_MAGIC:
271 if (!xfs_da3_node_verify(bp))
274 case XFS_ATTR_LEAF_MAGIC:
275 case XFS_ATTR3_LEAF_MAGIC:
276 bp->b_ops = &xfs_attr3_leaf_buf_ops;
277 bp->b_ops->verify_read(bp);
279 case XFS_DIR2_LEAFN_MAGIC:
280 case XFS_DIR3_LEAFN_MAGIC:
281 bp->b_ops = &xfs_dir3_leafn_buf_ops;
282 bp->b_ops->verify_read(bp);
289 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
290 xfs_buf_ioerror(bp, EFSCORRUPTED);
293 const struct xfs_buf_ops xfs_da3_node_buf_ops = {
294 .verify_read = xfs_da3_node_read_verify,
295 .verify_write = xfs_da3_node_write_verify,
300 struct xfs_trans *tp,
301 struct xfs_inode *dp,
303 xfs_daddr_t mappedbno,
304 struct xfs_buf **bpp,
309 err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
310 which_fork, &xfs_da3_node_buf_ops);
312 struct xfs_da_blkinfo *info = (*bpp)->b_addr;
315 switch (be16_to_cpu(info->magic)) {
316 case XFS_DA_NODE_MAGIC:
317 case XFS_DA3_NODE_MAGIC:
318 type = XFS_BLFT_DA_NODE_BUF;
320 case XFS_ATTR_LEAF_MAGIC:
321 case XFS_ATTR3_LEAF_MAGIC:
322 type = XFS_BLFT_ATTR_LEAF_BUF;
324 case XFS_DIR2_LEAFN_MAGIC:
325 case XFS_DIR3_LEAFN_MAGIC:
326 type = XFS_BLFT_DIR_LEAFN_BUF;
333 xfs_trans_buf_set_type(tp, *bpp, type);
338 /*========================================================================
339 * Routines used for growing the Btree.
340 *========================================================================*/
343 * Create the initial contents of an intermediate node.
347 struct xfs_da_args *args,
350 struct xfs_buf **bpp,
353 struct xfs_da_intnode *node;
354 struct xfs_trans *tp = args->trans;
355 struct xfs_mount *mp = tp->t_mountp;
356 struct xfs_da3_icnode_hdr ichdr = {0};
360 trace_xfs_da_node_create(args);
361 ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
363 error = xfs_da_get_buf(tp, args->dp, blkno, -1, &bp, whichfork);
366 bp->b_ops = &xfs_da3_node_buf_ops;
367 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
370 if (xfs_sb_version_hascrc(&mp->m_sb)) {
371 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
373 ichdr.magic = XFS_DA3_NODE_MAGIC;
374 hdr3->info.blkno = cpu_to_be64(bp->b_bn);
375 hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
376 uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_uuid);
378 ichdr.magic = XFS_DA_NODE_MAGIC;
382 xfs_da3_node_hdr_to_disk(node, &ichdr);
383 xfs_trans_log_buf(tp, bp,
384 XFS_DA_LOGRANGE(node, &node->hdr, xfs_da3_node_hdr_size(node)));
391 * Split a leaf node, rebalance, then possibly split
392 * intermediate nodes, rebalance, etc.
396 struct xfs_da_state *state)
398 struct xfs_da_state_blk *oldblk;
399 struct xfs_da_state_blk *newblk;
400 struct xfs_da_state_blk *addblk;
401 struct xfs_da_intnode *node;
408 trace_xfs_da_split(state->args);
411 * Walk back up the tree splitting/inserting/adjusting as necessary.
412 * If we need to insert and there isn't room, split the node, then
413 * decide which fragment to insert the new block from below into.
414 * Note that we may split the root this way, but we need more fixup.
416 max = state->path.active - 1;
417 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
418 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
419 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
421 addblk = &state->path.blk[max]; /* initial dummy value */
422 for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
423 oldblk = &state->path.blk[i];
424 newblk = &state->altpath.blk[i];
427 * If a leaf node then
428 * Allocate a new leaf node, then rebalance across them.
429 * else if an intermediate node then
430 * We split on the last layer, must we split the node?
432 switch (oldblk->magic) {
433 case XFS_ATTR_LEAF_MAGIC:
434 error = xfs_attr3_leaf_split(state, oldblk, newblk);
435 if ((error != 0) && (error != ENOSPC)) {
436 return(error); /* GROT: attr is inconsistent */
443 * Entry wouldn't fit, split the leaf again.
445 state->extravalid = 1;
447 state->extraafter = 0; /* before newblk */
448 trace_xfs_attr_leaf_split_before(state->args);
449 error = xfs_attr3_leaf_split(state, oldblk,
452 state->extraafter = 1; /* after newblk */
453 trace_xfs_attr_leaf_split_after(state->args);
454 error = xfs_attr3_leaf_split(state, newblk,
458 return(error); /* GROT: attr inconsistent */
461 case XFS_DIR2_LEAFN_MAGIC:
462 error = xfs_dir2_leafn_split(state, oldblk, newblk);
467 case XFS_DA_NODE_MAGIC:
468 error = xfs_da3_node_split(state, oldblk, newblk, addblk,
472 return(error); /* GROT: dir is inconsistent */
474 * Record the newly split block for the next time thru?
484 * Update the btree to show the new hashval for this child.
486 xfs_da3_fixhashpath(state, &state->path);
492 * Split the root node.
494 ASSERT(state->path.active == 0);
495 oldblk = &state->path.blk[0];
496 error = xfs_da3_root_split(state, oldblk, addblk);
499 return(error); /* GROT: dir is inconsistent */
503 * Update pointers to the node which used to be block 0 and
504 * just got bumped because of the addition of a new root node.
505 * There might be three blocks involved if a double split occurred,
506 * and the original block 0 could be at any position in the list.
508 * Note: the magic numbers and sibling pointers are in the same
509 * physical place for both v2 and v3 headers (by design). Hence it
510 * doesn't matter which version of the xfs_da_intnode structure we use
511 * here as the result will be the same using either structure.
513 node = oldblk->bp->b_addr;
514 if (node->hdr.info.forw) {
515 if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
518 ASSERT(state->extravalid);
519 bp = state->extrablk.bp;
522 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
523 xfs_trans_log_buf(state->args->trans, bp,
524 XFS_DA_LOGRANGE(node, &node->hdr.info,
525 sizeof(node->hdr.info)));
527 node = oldblk->bp->b_addr;
528 if (node->hdr.info.back) {
529 if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
532 ASSERT(state->extravalid);
533 bp = state->extrablk.bp;
536 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
537 xfs_trans_log_buf(state->args->trans, bp,
538 XFS_DA_LOGRANGE(node, &node->hdr.info,
539 sizeof(node->hdr.info)));
546 * Split the root. We have to create a new root and point to the two
547 * parts (the split old root) that we just created. Copy block zero to
548 * the EOF, extending the inode in process.
550 STATIC int /* error */
552 struct xfs_da_state *state,
553 struct xfs_da_state_blk *blk1,
554 struct xfs_da_state_blk *blk2)
556 struct xfs_da_intnode *node;
557 struct xfs_da_intnode *oldroot;
558 struct xfs_da_node_entry *btree;
559 struct xfs_da3_icnode_hdr nodehdr;
560 struct xfs_da_args *args;
562 struct xfs_inode *dp;
563 struct xfs_trans *tp;
564 struct xfs_mount *mp;
565 struct xfs_dir2_leaf *leaf;
571 trace_xfs_da_root_split(state->args);
574 * Copy the existing (incorrect) block from the root node position
575 * to a free space somewhere.
578 error = xfs_da_grow_inode(args, &blkno);
585 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
589 oldroot = blk1->bp->b_addr;
590 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
591 oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
592 struct xfs_da3_icnode_hdr nodehdr;
594 xfs_da3_node_hdr_from_disk(&nodehdr, oldroot);
595 btree = xfs_da3_node_tree_p(oldroot);
596 size = (int)((char *)&btree[nodehdr.count] - (char *)oldroot);
597 level = nodehdr.level;
600 * we are about to copy oldroot to bp, so set up the type
601 * of bp while we know exactly what it will be.
603 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
605 struct xfs_dir3_icleaf_hdr leafhdr;
606 struct xfs_dir2_leaf_entry *ents;
608 leaf = (xfs_dir2_leaf_t *)oldroot;
609 xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
610 ents = xfs_dir3_leaf_ents_p(leaf);
612 ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
613 leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
614 size = (int)((char *)&ents[leafhdr.count] - (char *)leaf);
618 * we are about to copy oldroot to bp, so set up the type
619 * of bp while we know exactly what it will be.
621 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
625 * we can copy most of the information in the node from one block to
626 * another, but for CRC enabled headers we have to make sure that the
627 * block specific identifiers are kept intact. We update the buffer
630 memcpy(node, oldroot, size);
631 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
632 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
633 struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node;
635 node3->hdr.info.blkno = cpu_to_be64(bp->b_bn);
637 xfs_trans_log_buf(tp, bp, 0, size - 1);
639 bp->b_ops = blk1->bp->b_ops;
640 xfs_trans_buf_copy_type(bp, blk1->bp);
645 * Set up the new root node.
647 error = xfs_da3_node_create(args,
648 (args->whichfork == XFS_DATA_FORK) ? mp->m_dirleafblk : 0,
649 level + 1, &bp, args->whichfork);
654 xfs_da3_node_hdr_from_disk(&nodehdr, node);
655 btree = xfs_da3_node_tree_p(node);
656 btree[0].hashval = cpu_to_be32(blk1->hashval);
657 btree[0].before = cpu_to_be32(blk1->blkno);
658 btree[1].hashval = cpu_to_be32(blk2->hashval);
659 btree[1].before = cpu_to_be32(blk2->blkno);
661 xfs_da3_node_hdr_to_disk(node, &nodehdr);
664 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
665 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
666 ASSERT(blk1->blkno >= mp->m_dirleafblk &&
667 blk1->blkno < mp->m_dirfreeblk);
668 ASSERT(blk2->blkno >= mp->m_dirleafblk &&
669 blk2->blkno < mp->m_dirfreeblk);
673 /* Header is already logged by xfs_da_node_create */
674 xfs_trans_log_buf(tp, bp,
675 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
681 * Split the node, rebalance, then add the new entry.
683 STATIC int /* error */
685 struct xfs_da_state *state,
686 struct xfs_da_state_blk *oldblk,
687 struct xfs_da_state_blk *newblk,
688 struct xfs_da_state_blk *addblk,
692 struct xfs_da_intnode *node;
693 struct xfs_da3_icnode_hdr nodehdr;
699 trace_xfs_da_node_split(state->args);
701 node = oldblk->bp->b_addr;
702 xfs_da3_node_hdr_from_disk(&nodehdr, node);
705 * With V2 dirs the extra block is data or freespace.
707 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
708 newcount = 1 + useextra;
710 * Do we have to split the node?
712 if (nodehdr.count + newcount > state->node_ents) {
714 * Allocate a new node, add to the doubly linked chain of
715 * nodes, then move some of our excess entries into it.
717 error = xfs_da_grow_inode(state->args, &blkno);
719 return(error); /* GROT: dir is inconsistent */
721 error = xfs_da3_node_create(state->args, blkno, treelevel,
722 &newblk->bp, state->args->whichfork);
724 return(error); /* GROT: dir is inconsistent */
725 newblk->blkno = blkno;
726 newblk->magic = XFS_DA_NODE_MAGIC;
727 xfs_da3_node_rebalance(state, oldblk, newblk);
728 error = xfs_da3_blk_link(state, oldblk, newblk);
737 * Insert the new entry(s) into the correct block
738 * (updating last hashval in the process).
740 * xfs_da3_node_add() inserts BEFORE the given index,
741 * and as a result of using node_lookup_int() we always
742 * point to a valid entry (not after one), but a split
743 * operation always results in a new block whose hashvals
744 * FOLLOW the current block.
746 * If we had double-split op below us, then add the extra block too.
748 node = oldblk->bp->b_addr;
749 xfs_da3_node_hdr_from_disk(&nodehdr, node);
750 if (oldblk->index <= nodehdr.count) {
752 xfs_da3_node_add(state, oldblk, addblk);
754 if (state->extraafter)
756 xfs_da3_node_add(state, oldblk, &state->extrablk);
757 state->extravalid = 0;
761 xfs_da3_node_add(state, newblk, addblk);
763 if (state->extraafter)
765 xfs_da3_node_add(state, newblk, &state->extrablk);
766 state->extravalid = 0;
774 * Balance the btree elements between two intermediate nodes,
775 * usually one full and one empty.
777 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
780 xfs_da3_node_rebalance(
781 struct xfs_da_state *state,
782 struct xfs_da_state_blk *blk1,
783 struct xfs_da_state_blk *blk2)
785 struct xfs_da_intnode *node1;
786 struct xfs_da_intnode *node2;
787 struct xfs_da_intnode *tmpnode;
788 struct xfs_da_node_entry *btree1;
789 struct xfs_da_node_entry *btree2;
790 struct xfs_da_node_entry *btree_s;
791 struct xfs_da_node_entry *btree_d;
792 struct xfs_da3_icnode_hdr nodehdr1;
793 struct xfs_da3_icnode_hdr nodehdr2;
794 struct xfs_trans *tp;
799 trace_xfs_da_node_rebalance(state->args);
801 node1 = blk1->bp->b_addr;
802 node2 = blk2->bp->b_addr;
803 xfs_da3_node_hdr_from_disk(&nodehdr1, node1);
804 xfs_da3_node_hdr_from_disk(&nodehdr2, node2);
805 btree1 = xfs_da3_node_tree_p(node1);
806 btree2 = xfs_da3_node_tree_p(node2);
809 * Figure out how many entries need to move, and in which direction.
810 * Swap the nodes around if that makes it simpler.
812 if (nodehdr1.count > 0 && nodehdr2.count > 0 &&
813 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
814 (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) <
815 be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) {
819 xfs_da3_node_hdr_from_disk(&nodehdr1, node1);
820 xfs_da3_node_hdr_from_disk(&nodehdr2, node2);
821 btree1 = xfs_da3_node_tree_p(node1);
822 btree2 = xfs_da3_node_tree_p(node2);
826 count = (nodehdr1.count - nodehdr2.count) / 2;
829 tp = state->args->trans;
831 * Two cases: high-to-low and low-to-high.
835 * Move elements in node2 up to make a hole.
837 tmp = nodehdr2.count;
839 tmp *= (uint)sizeof(xfs_da_node_entry_t);
840 btree_s = &btree2[0];
841 btree_d = &btree2[count];
842 memmove(btree_d, btree_s, tmp);
846 * Move the req'd B-tree elements from high in node1 to
849 nodehdr2.count += count;
850 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
851 btree_s = &btree1[nodehdr1.count - count];
852 btree_d = &btree2[0];
853 memcpy(btree_d, btree_s, tmp);
854 nodehdr1.count -= count;
857 * Move the req'd B-tree elements from low in node2 to
861 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
862 btree_s = &btree2[0];
863 btree_d = &btree1[nodehdr1.count];
864 memcpy(btree_d, btree_s, tmp);
865 nodehdr1.count += count;
867 xfs_trans_log_buf(tp, blk1->bp,
868 XFS_DA_LOGRANGE(node1, btree_d, tmp));
871 * Move elements in node2 down to fill the hole.
873 tmp = nodehdr2.count - count;
874 tmp *= (uint)sizeof(xfs_da_node_entry_t);
875 btree_s = &btree2[count];
876 btree_d = &btree2[0];
877 memmove(btree_d, btree_s, tmp);
878 nodehdr2.count -= count;
882 * Log header of node 1 and all current bits of node 2.
884 xfs_da3_node_hdr_to_disk(node1, &nodehdr1);
885 xfs_trans_log_buf(tp, blk1->bp,
886 XFS_DA_LOGRANGE(node1, &node1->hdr,
887 xfs_da3_node_hdr_size(node1)));
889 xfs_da3_node_hdr_to_disk(node2, &nodehdr2);
890 xfs_trans_log_buf(tp, blk2->bp,
891 XFS_DA_LOGRANGE(node2, &node2->hdr,
892 xfs_da3_node_hdr_size(node2) +
893 (sizeof(btree2[0]) * nodehdr2.count)));
896 * Record the last hashval from each block for upward propagation.
897 * (note: don't use the swapped node pointers)
900 node1 = blk1->bp->b_addr;
901 node2 = blk2->bp->b_addr;
902 xfs_da3_node_hdr_from_disk(&nodehdr1, node1);
903 xfs_da3_node_hdr_from_disk(&nodehdr2, node2);
904 btree1 = xfs_da3_node_tree_p(node1);
905 btree2 = xfs_da3_node_tree_p(node2);
907 blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
908 blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
911 * Adjust the expected index for insertion.
913 if (blk1->index >= nodehdr1.count) {
914 blk2->index = blk1->index - nodehdr1.count;
915 blk1->index = nodehdr1.count + 1; /* make it invalid */
920 * Add a new entry to an intermediate node.
924 struct xfs_da_state *state,
925 struct xfs_da_state_blk *oldblk,
926 struct xfs_da_state_blk *newblk)
928 struct xfs_da_intnode *node;
929 struct xfs_da3_icnode_hdr nodehdr;
930 struct xfs_da_node_entry *btree;
933 trace_xfs_da_node_add(state->args);
935 node = oldblk->bp->b_addr;
936 xfs_da3_node_hdr_from_disk(&nodehdr, node);
937 btree = xfs_da3_node_tree_p(node);
939 ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
940 ASSERT(newblk->blkno != 0);
941 if (state->args->whichfork == XFS_DATA_FORK)
942 ASSERT(newblk->blkno >= state->mp->m_dirleafblk &&
943 newblk->blkno < state->mp->m_dirfreeblk);
946 * We may need to make some room before we insert the new node.
949 if (oldblk->index < nodehdr.count) {
950 tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree);
951 memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp);
953 btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval);
954 btree[oldblk->index].before = cpu_to_be32(newblk->blkno);
955 xfs_trans_log_buf(state->args->trans, oldblk->bp,
956 XFS_DA_LOGRANGE(node, &btree[oldblk->index],
957 tmp + sizeof(*btree)));
960 xfs_da3_node_hdr_to_disk(node, &nodehdr);
961 xfs_trans_log_buf(state->args->trans, oldblk->bp,
962 XFS_DA_LOGRANGE(node, &node->hdr, xfs_da3_node_hdr_size(node)));
965 * Copy the last hash value from the oldblk to propagate upwards.
967 oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
970 /*========================================================================
971 * Routines used for shrinking the Btree.
972 *========================================================================*/
975 * Deallocate an empty leaf node, remove it from its parent,
976 * possibly deallocating that block, etc...
980 struct xfs_da_state *state)
982 struct xfs_da_state_blk *drop_blk;
983 struct xfs_da_state_blk *save_blk;
987 trace_xfs_da_join(state->args);
989 drop_blk = &state->path.blk[ state->path.active-1 ];
990 save_blk = &state->altpath.blk[ state->path.active-1 ];
991 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
992 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
993 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
996 * Walk back up the tree joining/deallocating as necessary.
997 * When we stop dropping blocks, break out.
999 for ( ; state->path.active >= 2; drop_blk--, save_blk--,
1000 state->path.active--) {
1002 * See if we can combine the block with a neighbor.
1003 * (action == 0) => no options, just leave
1004 * (action == 1) => coalesce, then unlink
1005 * (action == 2) => block empty, unlink it
1007 switch (drop_blk->magic) {
1008 case XFS_ATTR_LEAF_MAGIC:
1009 error = xfs_attr3_leaf_toosmall(state, &action);
1014 xfs_attr3_leaf_unbalance(state, drop_blk, save_blk);
1016 case XFS_DIR2_LEAFN_MAGIC:
1017 error = xfs_dir2_leafn_toosmall(state, &action);
1022 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
1024 case XFS_DA_NODE_MAGIC:
1026 * Remove the offending node, fixup hashvals,
1027 * check for a toosmall neighbor.
1029 xfs_da3_node_remove(state, drop_blk);
1030 xfs_da3_fixhashpath(state, &state->path);
1031 error = xfs_da3_node_toosmall(state, &action);
1036 xfs_da3_node_unbalance(state, drop_blk, save_blk);
1039 xfs_da3_fixhashpath(state, &state->altpath);
1040 error = xfs_da3_blk_unlink(state, drop_blk, save_blk);
1041 xfs_da_state_kill_altpath(state);
1044 error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
1046 drop_blk->bp = NULL;
1051 * We joined all the way to the top. If it turns out that
1052 * we only have one entry in the root, make the child block
1055 xfs_da3_node_remove(state, drop_blk);
1056 xfs_da3_fixhashpath(state, &state->path);
1057 error = xfs_da3_root_join(state, &state->path.blk[0]);
1063 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
1065 __be16 magic = blkinfo->magic;
1068 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1069 magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1070 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1071 magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1073 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1074 magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
1076 ASSERT(!blkinfo->forw);
1077 ASSERT(!blkinfo->back);
1080 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
1084 * We have only one entry in the root. Copy the only remaining child of
1085 * the old root to block 0 as the new root node.
1089 struct xfs_da_state *state,
1090 struct xfs_da_state_blk *root_blk)
1092 struct xfs_da_intnode *oldroot;
1093 struct xfs_da_args *args;
1096 struct xfs_da3_icnode_hdr oldroothdr;
1097 struct xfs_da_node_entry *btree;
1100 trace_xfs_da_root_join(state->args);
1102 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
1105 oldroot = root_blk->bp->b_addr;
1106 xfs_da3_node_hdr_from_disk(&oldroothdr, oldroot);
1107 ASSERT(oldroothdr.forw == 0);
1108 ASSERT(oldroothdr.back == 0);
1111 * If the root has more than one child, then don't do anything.
1113 if (oldroothdr.count > 1)
1117 * Read in the (only) child block, then copy those bytes into
1118 * the root block's buffer and free the original child block.
1120 btree = xfs_da3_node_tree_p(oldroot);
1121 child = be32_to_cpu(btree[0].before);
1123 error = xfs_da3_node_read(args->trans, args->dp, child, -1, &bp,
1127 xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
1130 * This could be copying a leaf back into the root block in the case of
1131 * there only being a single leaf block left in the tree. Hence we have
1132 * to update the b_ops pointer as well to match the buffer type change
1133 * that could occur. For dir3 blocks we also need to update the block
1134 * number in the buffer header.
1136 memcpy(root_blk->bp->b_addr, bp->b_addr, state->blocksize);
1137 root_blk->bp->b_ops = bp->b_ops;
1138 xfs_trans_buf_copy_type(root_blk->bp, bp);
1139 if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) {
1140 struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr;
1141 da3->blkno = cpu_to_be64(root_blk->bp->b_bn);
1143 xfs_trans_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
1144 error = xfs_da_shrink_inode(args, child, bp);
1149 * Check a node block and its neighbors to see if the block should be
1150 * collapsed into one or the other neighbor. Always keep the block
1151 * with the smaller block number.
1152 * If the current block is over 50% full, don't try to join it, return 0.
1153 * If the block is empty, fill in the state structure and return 2.
1154 * If it can be collapsed, fill in the state structure and return 1.
1155 * If nothing can be done, return 0.
1158 xfs_da3_node_toosmall(
1159 struct xfs_da_state *state,
1162 struct xfs_da_intnode *node;
1163 struct xfs_da_state_blk *blk;
1164 struct xfs_da_blkinfo *info;
1167 struct xfs_da3_icnode_hdr nodehdr;
1174 trace_xfs_da_node_toosmall(state->args);
1177 * Check for the degenerate case of the block being over 50% full.
1178 * If so, it's not worth even looking to see if we might be able
1179 * to coalesce with a sibling.
1181 blk = &state->path.blk[ state->path.active-1 ];
1182 info = blk->bp->b_addr;
1183 node = (xfs_da_intnode_t *)info;
1184 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1185 if (nodehdr.count > (state->node_ents >> 1)) {
1186 *action = 0; /* blk over 50%, don't try to join */
1187 return(0); /* blk over 50%, don't try to join */
1191 * Check for the degenerate case of the block being empty.
1192 * If the block is empty, we'll simply delete it, no need to
1193 * coalesce it with a sibling block. We choose (arbitrarily)
1194 * to merge with the forward block unless it is NULL.
1196 if (nodehdr.count == 0) {
1198 * Make altpath point to the block we want to keep and
1199 * path point to the block we want to drop (this one).
1201 forward = (info->forw != 0);
1202 memcpy(&state->altpath, &state->path, sizeof(state->path));
1203 error = xfs_da3_path_shift(state, &state->altpath, forward,
1216 * Examine each sibling block to see if we can coalesce with
1217 * at least 25% free space to spare. We need to figure out
1218 * whether to merge with the forward or the backward block.
1219 * We prefer coalescing with the lower numbered sibling so as
1220 * to shrink a directory over time.
1222 count = state->node_ents;
1223 count -= state->node_ents >> 2;
1224 count -= nodehdr.count;
1226 /* start with smaller blk num */
1227 forward = nodehdr.forw < nodehdr.back;
1228 for (i = 0; i < 2; forward = !forward, i++) {
1229 struct xfs_da3_icnode_hdr thdr;
1231 blkno = nodehdr.forw;
1233 blkno = nodehdr.back;
1236 error = xfs_da3_node_read(state->args->trans, state->args->dp,
1237 blkno, -1, &bp, state->args->whichfork);
1242 xfs_da3_node_hdr_from_disk(&thdr, node);
1243 xfs_trans_brelse(state->args->trans, bp);
1245 if (count - thdr.count >= 0)
1246 break; /* fits with at least 25% to spare */
1254 * Make altpath point to the block we want to keep (the lower
1255 * numbered block) and path point to the block we want to drop.
1257 memcpy(&state->altpath, &state->path, sizeof(state->path));
1258 if (blkno < blk->blkno) {
1259 error = xfs_da3_path_shift(state, &state->altpath, forward,
1262 error = xfs_da3_path_shift(state, &state->path, forward,
1276 * Pick up the last hashvalue from an intermediate node.
1279 xfs_da3_node_lasthash(
1283 struct xfs_da_intnode *node;
1284 struct xfs_da_node_entry *btree;
1285 struct xfs_da3_icnode_hdr nodehdr;
1288 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1290 *count = nodehdr.count;
1293 btree = xfs_da3_node_tree_p(node);
1294 return be32_to_cpu(btree[nodehdr.count - 1].hashval);
1298 * Walk back up the tree adjusting hash values as necessary,
1299 * when we stop making changes, return.
1302 xfs_da3_fixhashpath(
1303 struct xfs_da_state *state,
1304 struct xfs_da_state_path *path)
1306 struct xfs_da_state_blk *blk;
1307 struct xfs_da_intnode *node;
1308 struct xfs_da_node_entry *btree;
1309 xfs_dahash_t lasthash=0;
1313 trace_xfs_da_fixhashpath(state->args);
1315 level = path->active-1;
1316 blk = &path->blk[ level ];
1317 switch (blk->magic) {
1318 case XFS_ATTR_LEAF_MAGIC:
1319 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
1323 case XFS_DIR2_LEAFN_MAGIC:
1324 lasthash = xfs_dir2_leafn_lasthash(blk->bp, &count);
1328 case XFS_DA_NODE_MAGIC:
1329 lasthash = xfs_da3_node_lasthash(blk->bp, &count);
1334 for (blk--, level--; level >= 0; blk--, level--) {
1335 struct xfs_da3_icnode_hdr nodehdr;
1337 node = blk->bp->b_addr;
1338 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1339 btree = xfs_da3_node_tree_p(node);
1340 if (be32_to_cpu(btree->hashval) == lasthash)
1342 blk->hashval = lasthash;
1343 btree[blk->index].hashval = cpu_to_be32(lasthash);
1344 xfs_trans_log_buf(state->args->trans, blk->bp,
1345 XFS_DA_LOGRANGE(node, &btree[blk->index],
1348 lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1353 * Remove an entry from an intermediate node.
1356 xfs_da3_node_remove(
1357 struct xfs_da_state *state,
1358 struct xfs_da_state_blk *drop_blk)
1360 struct xfs_da_intnode *node;
1361 struct xfs_da3_icnode_hdr nodehdr;
1362 struct xfs_da_node_entry *btree;
1366 trace_xfs_da_node_remove(state->args);
1368 node = drop_blk->bp->b_addr;
1369 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1370 ASSERT(drop_blk->index < nodehdr.count);
1371 ASSERT(drop_blk->index >= 0);
1374 * Copy over the offending entry, or just zero it out.
1376 index = drop_blk->index;
1377 btree = xfs_da3_node_tree_p(node);
1378 if (index < nodehdr.count - 1) {
1379 tmp = nodehdr.count - index - 1;
1380 tmp *= (uint)sizeof(xfs_da_node_entry_t);
1381 memmove(&btree[index], &btree[index + 1], tmp);
1382 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1383 XFS_DA_LOGRANGE(node, &btree[index], tmp));
1384 index = nodehdr.count - 1;
1386 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
1387 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1388 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
1390 xfs_da3_node_hdr_to_disk(node, &nodehdr);
1391 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1392 XFS_DA_LOGRANGE(node, &node->hdr, xfs_da3_node_hdr_size(node)));
1395 * Copy the last hash value from the block to propagate upwards.
1397 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval);
1401 * Unbalance the elements between two intermediate nodes,
1402 * move all Btree elements from one node into another.
1405 xfs_da3_node_unbalance(
1406 struct xfs_da_state *state,
1407 struct xfs_da_state_blk *drop_blk,
1408 struct xfs_da_state_blk *save_blk)
1410 struct xfs_da_intnode *drop_node;
1411 struct xfs_da_intnode *save_node;
1412 struct xfs_da_node_entry *drop_btree;
1413 struct xfs_da_node_entry *save_btree;
1414 struct xfs_da3_icnode_hdr drop_hdr;
1415 struct xfs_da3_icnode_hdr save_hdr;
1416 struct xfs_trans *tp;
1420 trace_xfs_da_node_unbalance(state->args);
1422 drop_node = drop_blk->bp->b_addr;
1423 save_node = save_blk->bp->b_addr;
1424 xfs_da3_node_hdr_from_disk(&drop_hdr, drop_node);
1425 xfs_da3_node_hdr_from_disk(&save_hdr, save_node);
1426 drop_btree = xfs_da3_node_tree_p(drop_node);
1427 save_btree = xfs_da3_node_tree_p(save_node);
1428 tp = state->args->trans;
1431 * If the dying block has lower hashvals, then move all the
1432 * elements in the remaining block up to make a hole.
1434 if ((be32_to_cpu(drop_btree[0].hashval) <
1435 be32_to_cpu(save_btree[0].hashval)) ||
1436 (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) <
1437 be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) {
1438 /* XXX: check this - is memmove dst correct? */
1439 tmp = save_hdr.count * sizeof(xfs_da_node_entry_t);
1440 memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp);
1443 xfs_trans_log_buf(tp, save_blk->bp,
1444 XFS_DA_LOGRANGE(save_node, &save_btree[0],
1445 (save_hdr.count + drop_hdr.count) *
1446 sizeof(xfs_da_node_entry_t)));
1448 sindex = save_hdr.count;
1449 xfs_trans_log_buf(tp, save_blk->bp,
1450 XFS_DA_LOGRANGE(save_node, &save_btree[sindex],
1451 drop_hdr.count * sizeof(xfs_da_node_entry_t)));
1455 * Move all the B-tree elements from drop_blk to save_blk.
1457 tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t);
1458 memcpy(&save_btree[sindex], &drop_btree[0], tmp);
1459 save_hdr.count += drop_hdr.count;
1461 xfs_da3_node_hdr_to_disk(save_node, &save_hdr);
1462 xfs_trans_log_buf(tp, save_blk->bp,
1463 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1464 xfs_da3_node_hdr_size(save_node)));
1467 * Save the last hashval in the remaining block for upward propagation.
1469 save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval);
1472 /*========================================================================
1473 * Routines used for finding things in the Btree.
1474 *========================================================================*/
1477 * Walk down the Btree looking for a particular filename, filling
1478 * in the state structure as we go.
1480 * We will set the state structure to point to each of the elements
1481 * in each of the nodes where either the hashval is or should be.
1483 * We support duplicate hashval's so for each entry in the current
1484 * node that could contain the desired hashval, descend. This is a
1485 * pruned depth-first tree search.
1488 xfs_da3_node_lookup_int(
1489 struct xfs_da_state *state,
1492 struct xfs_da_state_blk *blk;
1493 struct xfs_da_blkinfo *curr;
1494 struct xfs_da_intnode *node;
1495 struct xfs_da_node_entry *btree;
1496 struct xfs_da3_icnode_hdr nodehdr;
1497 struct xfs_da_args *args;
1499 xfs_dahash_t hashval;
1500 xfs_dahash_t btreehashval;
1510 * Descend thru the B-tree searching each level for the right
1511 * node to use, until the right hashval is found.
1513 blkno = (args->whichfork == XFS_DATA_FORK)? state->mp->m_dirleafblk : 0;
1514 for (blk = &state->path.blk[0], state->path.active = 1;
1515 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1516 blk++, state->path.active++) {
1518 * Read the next node down in the tree.
1521 error = xfs_da3_node_read(args->trans, args->dp, blkno,
1522 -1, &blk->bp, args->whichfork);
1525 state->path.active--;
1528 curr = blk->bp->b_addr;
1529 blk->magic = be16_to_cpu(curr->magic);
1531 if (blk->magic == XFS_ATTR_LEAF_MAGIC ||
1532 blk->magic == XFS_ATTR3_LEAF_MAGIC) {
1533 blk->magic = XFS_ATTR_LEAF_MAGIC;
1534 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1538 if (blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1539 blk->magic == XFS_DIR3_LEAFN_MAGIC) {
1540 blk->magic = XFS_DIR2_LEAFN_MAGIC;
1541 blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL);
1545 blk->magic = XFS_DA_NODE_MAGIC;
1549 * Search an intermediate node for a match.
1551 node = blk->bp->b_addr;
1552 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1553 btree = xfs_da3_node_tree_p(node);
1555 max = nodehdr.count;
1556 blk->hashval = be32_to_cpu(btree[max - 1].hashval);
1559 * Binary search. (note: small blocks will skip loop)
1561 probe = span = max / 2;
1562 hashval = args->hashval;
1565 btreehashval = be32_to_cpu(btree[probe].hashval);
1566 if (btreehashval < hashval)
1568 else if (btreehashval > hashval)
1573 ASSERT((probe >= 0) && (probe < max));
1574 ASSERT((span <= 4) ||
1575 (be32_to_cpu(btree[probe].hashval) == hashval));
1578 * Since we may have duplicate hashval's, find the first
1579 * matching hashval in the node.
1582 be32_to_cpu(btree[probe].hashval) >= hashval) {
1585 while (probe < max &&
1586 be32_to_cpu(btree[probe].hashval) < hashval) {
1591 * Pick the right block to descend on.
1594 blk->index = max - 1;
1595 blkno = be32_to_cpu(btree[max - 1].before);
1598 blkno = be32_to_cpu(btree[probe].before);
1603 * A leaf block that ends in the hashval that we are interested in
1604 * (final hashval == search hashval) means that the next block may
1605 * contain more entries with the same hashval, shift upward to the
1606 * next leaf and keep searching.
1609 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1610 retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1611 &blk->index, state);
1612 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1613 retval = xfs_attr3_leaf_lookup_int(blk->bp, args);
1614 blk->index = args->index;
1615 args->blkno = blk->blkno;
1618 return XFS_ERROR(EFSCORRUPTED);
1620 if (((retval == ENOENT) || (retval == ENOATTR)) &&
1621 (blk->hashval == args->hashval)) {
1622 error = xfs_da3_path_shift(state, &state->path, 1, 1,
1628 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1629 /* path_shift() gives ENOENT */
1630 retval = XFS_ERROR(ENOATTR);
1639 /*========================================================================
1641 *========================================================================*/
1644 * Compare two intermediate nodes for "order".
1648 struct xfs_buf *node1_bp,
1649 struct xfs_buf *node2_bp)
1651 struct xfs_da_intnode *node1;
1652 struct xfs_da_intnode *node2;
1653 struct xfs_da_node_entry *btree1;
1654 struct xfs_da_node_entry *btree2;
1655 struct xfs_da3_icnode_hdr node1hdr;
1656 struct xfs_da3_icnode_hdr node2hdr;
1658 node1 = node1_bp->b_addr;
1659 node2 = node2_bp->b_addr;
1660 xfs_da3_node_hdr_from_disk(&node1hdr, node1);
1661 xfs_da3_node_hdr_from_disk(&node2hdr, node2);
1662 btree1 = xfs_da3_node_tree_p(node1);
1663 btree2 = xfs_da3_node_tree_p(node2);
1665 if (node1hdr.count > 0 && node2hdr.count > 0 &&
1666 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
1667 (be32_to_cpu(btree2[node2hdr.count - 1].hashval) <
1668 be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) {
1675 * Link a new block into a doubly linked list of blocks (of whatever type).
1679 struct xfs_da_state *state,
1680 struct xfs_da_state_blk *old_blk,
1681 struct xfs_da_state_blk *new_blk)
1683 struct xfs_da_blkinfo *old_info;
1684 struct xfs_da_blkinfo *new_info;
1685 struct xfs_da_blkinfo *tmp_info;
1686 struct xfs_da_args *args;
1692 * Set up environment.
1695 ASSERT(args != NULL);
1696 old_info = old_blk->bp->b_addr;
1697 new_info = new_blk->bp->b_addr;
1698 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1699 old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1700 old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1702 switch (old_blk->magic) {
1703 case XFS_ATTR_LEAF_MAGIC:
1704 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1706 case XFS_DIR2_LEAFN_MAGIC:
1707 before = xfs_dir2_leafn_order(old_blk->bp, new_blk->bp);
1709 case XFS_DA_NODE_MAGIC:
1710 before = xfs_da3_node_order(old_blk->bp, new_blk->bp);
1715 * Link blocks in appropriate order.
1719 * Link new block in before existing block.
1721 trace_xfs_da_link_before(args);
1722 new_info->forw = cpu_to_be32(old_blk->blkno);
1723 new_info->back = old_info->back;
1724 if (old_info->back) {
1725 error = xfs_da3_node_read(args->trans, args->dp,
1726 be32_to_cpu(old_info->back),
1727 -1, &bp, args->whichfork);
1731 tmp_info = bp->b_addr;
1732 ASSERT(tmp_info->magic == old_info->magic);
1733 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1734 tmp_info->forw = cpu_to_be32(new_blk->blkno);
1735 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1737 old_info->back = cpu_to_be32(new_blk->blkno);
1740 * Link new block in after existing block.
1742 trace_xfs_da_link_after(args);
1743 new_info->forw = old_info->forw;
1744 new_info->back = cpu_to_be32(old_blk->blkno);
1745 if (old_info->forw) {
1746 error = xfs_da3_node_read(args->trans, args->dp,
1747 be32_to_cpu(old_info->forw),
1748 -1, &bp, args->whichfork);
1752 tmp_info = bp->b_addr;
1753 ASSERT(tmp_info->magic == old_info->magic);
1754 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1755 tmp_info->back = cpu_to_be32(new_blk->blkno);
1756 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1758 old_info->forw = cpu_to_be32(new_blk->blkno);
1761 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1762 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1767 * Unlink a block from a doubly linked list of blocks.
1769 STATIC int /* error */
1771 struct xfs_da_state *state,
1772 struct xfs_da_state_blk *drop_blk,
1773 struct xfs_da_state_blk *save_blk)
1775 struct xfs_da_blkinfo *drop_info;
1776 struct xfs_da_blkinfo *save_info;
1777 struct xfs_da_blkinfo *tmp_info;
1778 struct xfs_da_args *args;
1783 * Set up environment.
1786 ASSERT(args != NULL);
1787 save_info = save_blk->bp->b_addr;
1788 drop_info = drop_blk->bp->b_addr;
1789 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1790 save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1791 save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1792 ASSERT(save_blk->magic == drop_blk->magic);
1793 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1794 (be32_to_cpu(save_info->back) == drop_blk->blkno));
1795 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1796 (be32_to_cpu(drop_info->back) == save_blk->blkno));
1799 * Unlink the leaf block from the doubly linked chain of leaves.
1801 if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1802 trace_xfs_da_unlink_back(args);
1803 save_info->back = drop_info->back;
1804 if (drop_info->back) {
1805 error = xfs_da3_node_read(args->trans, args->dp,
1806 be32_to_cpu(drop_info->back),
1807 -1, &bp, args->whichfork);
1811 tmp_info = bp->b_addr;
1812 ASSERT(tmp_info->magic == save_info->magic);
1813 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1814 tmp_info->forw = cpu_to_be32(save_blk->blkno);
1815 xfs_trans_log_buf(args->trans, bp, 0,
1816 sizeof(*tmp_info) - 1);
1819 trace_xfs_da_unlink_forward(args);
1820 save_info->forw = drop_info->forw;
1821 if (drop_info->forw) {
1822 error = xfs_da3_node_read(args->trans, args->dp,
1823 be32_to_cpu(drop_info->forw),
1824 -1, &bp, args->whichfork);
1828 tmp_info = bp->b_addr;
1829 ASSERT(tmp_info->magic == save_info->magic);
1830 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1831 tmp_info->back = cpu_to_be32(save_blk->blkno);
1832 xfs_trans_log_buf(args->trans, bp, 0,
1833 sizeof(*tmp_info) - 1);
1837 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1842 * Move a path "forward" or "!forward" one block at the current level.
1844 * This routine will adjust a "path" to point to the next block
1845 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1846 * Btree, including updating pointers to the intermediate nodes between
1847 * the new bottom and the root.
1851 struct xfs_da_state *state,
1852 struct xfs_da_state_path *path,
1857 struct xfs_da_state_blk *blk;
1858 struct xfs_da_blkinfo *info;
1859 struct xfs_da_intnode *node;
1860 struct xfs_da_args *args;
1861 struct xfs_da_node_entry *btree;
1862 struct xfs_da3_icnode_hdr nodehdr;
1863 xfs_dablk_t blkno = 0;
1867 trace_xfs_da_path_shift(state->args);
1870 * Roll up the Btree looking for the first block where our
1871 * current index is not at the edge of the block. Note that
1872 * we skip the bottom layer because we want the sibling block.
1875 ASSERT(args != NULL);
1876 ASSERT(path != NULL);
1877 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1878 level = (path->active-1) - 1; /* skip bottom layer in path */
1879 for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1880 node = blk->bp->b_addr;
1881 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1882 btree = xfs_da3_node_tree_p(node);
1884 if (forward && (blk->index < nodehdr.count - 1)) {
1886 blkno = be32_to_cpu(btree[blk->index].before);
1888 } else if (!forward && (blk->index > 0)) {
1890 blkno = be32_to_cpu(btree[blk->index].before);
1895 *result = XFS_ERROR(ENOENT); /* we're out of our tree */
1896 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
1901 * Roll down the edge of the subtree until we reach the
1902 * same depth we were at originally.
1904 for (blk++, level++; level < path->active; blk++, level++) {
1906 * Release the old block.
1907 * (if it's dirty, trans won't actually let go)
1910 xfs_trans_brelse(args->trans, blk->bp);
1913 * Read the next child block.
1916 error = xfs_da3_node_read(args->trans, args->dp, blkno, -1,
1917 &blk->bp, args->whichfork);
1920 info = blk->bp->b_addr;
1921 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1922 info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
1923 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1924 info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1925 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1926 info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1930 * Note: we flatten the magic number to a single type so we
1931 * don't have to compare against crc/non-crc types elsewhere.
1933 switch (be16_to_cpu(info->magic)) {
1934 case XFS_DA_NODE_MAGIC:
1935 case XFS_DA3_NODE_MAGIC:
1936 blk->magic = XFS_DA_NODE_MAGIC;
1937 node = (xfs_da_intnode_t *)info;
1938 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1939 btree = xfs_da3_node_tree_p(node);
1940 blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1944 blk->index = nodehdr.count - 1;
1945 blkno = be32_to_cpu(btree[blk->index].before);
1947 case XFS_ATTR_LEAF_MAGIC:
1948 case XFS_ATTR3_LEAF_MAGIC:
1949 blk->magic = XFS_ATTR_LEAF_MAGIC;
1950 ASSERT(level == path->active-1);
1952 blk->hashval = xfs_attr_leaf_lasthash(blk->bp,
1955 case XFS_DIR2_LEAFN_MAGIC:
1956 case XFS_DIR3_LEAFN_MAGIC:
1957 blk->magic = XFS_DIR2_LEAFN_MAGIC;
1958 ASSERT(level == path->active-1);
1960 blk->hashval = xfs_dir2_leafn_lasthash(blk->bp,
1973 /*========================================================================
1975 *========================================================================*/
1978 * Implement a simple hash on a character string.
1979 * Rotate the hash value by 7 bits, then XOR each character in.
1980 * This is implemented with some source-level loop unrolling.
1983 xfs_da_hashname(const __uint8_t *name, int namelen)
1988 * Do four characters at a time as long as we can.
1990 for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
1991 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
1992 (name[3] << 0) ^ rol32(hash, 7 * 4);
1995 * Now do the rest of the characters.
1999 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
2002 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
2004 return (name[0] << 0) ^ rol32(hash, 7 * 1);
2005 default: /* case 0: */
2012 struct xfs_da_args *args,
2013 const unsigned char *name,
2016 return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
2017 XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
2021 xfs_default_hashname(
2022 struct xfs_name *name)
2024 return xfs_da_hashname(name->name, name->len);
2027 const struct xfs_nameops xfs_default_nameops = {
2028 .hashname = xfs_default_hashname,
2029 .compname = xfs_da_compname
2033 xfs_da_grow_inode_int(
2034 struct xfs_da_args *args,
2038 struct xfs_trans *tp = args->trans;
2039 struct xfs_inode *dp = args->dp;
2040 int w = args->whichfork;
2041 xfs_drfsbno_t nblks = dp->i_d.di_nblocks;
2042 struct xfs_bmbt_irec map, *mapp;
2043 int nmap, error, got, i, mapi;
2046 * Find a spot in the file space to put the new block.
2048 error = xfs_bmap_first_unused(tp, dp, count, bno, w);
2053 * Try mapping it in one filesystem block.
2056 ASSERT(args->firstblock != NULL);
2057 error = xfs_bmapi_write(tp, dp, *bno, count,
2058 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
2059 args->firstblock, args->total, &map, &nmap,
2068 } else if (nmap == 0 && count > 1) {
2073 * If we didn't get it and the block might work if fragmented,
2074 * try without the CONTIG flag. Loop until we get it all.
2076 mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
2077 for (b = *bno, mapi = 0; b < *bno + count; ) {
2078 nmap = MIN(XFS_BMAP_MAX_NMAP, count);
2079 c = (int)(*bno + count - b);
2080 error = xfs_bmapi_write(tp, dp, b, c,
2081 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
2082 args->firstblock, args->total,
2083 &mapp[mapi], &nmap, args->flist);
2089 b = mapp[mapi - 1].br_startoff +
2090 mapp[mapi - 1].br_blockcount;
2098 * Count the blocks we got, make sure it matches the total.
2100 for (i = 0, got = 0; i < mapi; i++)
2101 got += mapp[i].br_blockcount;
2102 if (got != count || mapp[0].br_startoff != *bno ||
2103 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
2105 error = XFS_ERROR(ENOSPC);
2109 /* account for newly allocated blocks in reserved blocks total */
2110 args->total -= dp->i_d.di_nblocks - nblks;
2119 * Add a block to the btree ahead of the file.
2120 * Return the new block number to the caller.
2124 struct xfs_da_args *args,
2125 xfs_dablk_t *new_blkno)
2131 trace_xfs_da_grow_inode(args);
2133 if (args->whichfork == XFS_DATA_FORK) {
2134 bno = args->dp->i_mount->m_dirleafblk;
2135 count = args->dp->i_mount->m_dirblkfsbs;
2141 error = xfs_da_grow_inode_int(args, &bno, count);
2143 *new_blkno = (xfs_dablk_t)bno;
2148 * Ick. We need to always be able to remove a btree block, even
2149 * if there's no space reservation because the filesystem is full.
2150 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2151 * It swaps the target block with the last block in the file. The
2152 * last block in the file can always be removed since it can't cause
2153 * a bmap btree split to do that.
2156 xfs_da3_swap_lastblock(
2157 struct xfs_da_args *args,
2158 xfs_dablk_t *dead_blknop,
2159 struct xfs_buf **dead_bufp)
2161 struct xfs_da_blkinfo *dead_info;
2162 struct xfs_da_blkinfo *sib_info;
2163 struct xfs_da_intnode *par_node;
2164 struct xfs_da_intnode *dead_node;
2165 struct xfs_dir2_leaf *dead_leaf2;
2166 struct xfs_da_node_entry *btree;
2167 struct xfs_da3_icnode_hdr par_hdr;
2168 struct xfs_inode *ip;
2169 struct xfs_trans *tp;
2170 struct xfs_mount *mp;
2171 struct xfs_buf *dead_buf;
2172 struct xfs_buf *last_buf;
2173 struct xfs_buf *sib_buf;
2174 struct xfs_buf *par_buf;
2175 xfs_dahash_t dead_hash;
2176 xfs_fileoff_t lastoff;
2177 xfs_dablk_t dead_blkno;
2178 xfs_dablk_t last_blkno;
2179 xfs_dablk_t sib_blkno;
2180 xfs_dablk_t par_blkno;
2187 trace_xfs_da_swap_lastblock(args);
2189 dead_buf = *dead_bufp;
2190 dead_blkno = *dead_blknop;
2193 w = args->whichfork;
2194 ASSERT(w == XFS_DATA_FORK);
2196 lastoff = mp->m_dirfreeblk;
2197 error = xfs_bmap_last_before(tp, ip, &lastoff, w);
2200 if (unlikely(lastoff == 0)) {
2201 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
2203 return XFS_ERROR(EFSCORRUPTED);
2206 * Read the last block in the btree space.
2208 last_blkno = (xfs_dablk_t)lastoff - mp->m_dirblkfsbs;
2209 error = xfs_da3_node_read(tp, ip, last_blkno, -1, &last_buf, w);
2213 * Copy the last block into the dead buffer and log it.
2215 memcpy(dead_buf->b_addr, last_buf->b_addr, mp->m_dirblksize);
2216 xfs_trans_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
2217 dead_info = dead_buf->b_addr;
2219 * Get values from the moved block.
2221 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
2222 dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
2223 struct xfs_dir3_icleaf_hdr leafhdr;
2224 struct xfs_dir2_leaf_entry *ents;
2226 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
2227 xfs_dir3_leaf_hdr_from_disk(&leafhdr, dead_leaf2);
2228 ents = xfs_dir3_leaf_ents_p(dead_leaf2);
2230 dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
2232 struct xfs_da3_icnode_hdr deadhdr;
2234 dead_node = (xfs_da_intnode_t *)dead_info;
2235 xfs_da3_node_hdr_from_disk(&deadhdr, dead_node);
2236 btree = xfs_da3_node_tree_p(dead_node);
2237 dead_level = deadhdr.level;
2238 dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
2240 sib_buf = par_buf = NULL;
2242 * If the moved block has a left sibling, fix up the pointers.
2244 if ((sib_blkno = be32_to_cpu(dead_info->back))) {
2245 error = xfs_da3_node_read(tp, ip, sib_blkno, -1, &sib_buf, w);
2248 sib_info = sib_buf->b_addr;
2250 be32_to_cpu(sib_info->forw) != last_blkno ||
2251 sib_info->magic != dead_info->magic)) {
2252 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
2253 XFS_ERRLEVEL_LOW, mp);
2254 error = XFS_ERROR(EFSCORRUPTED);
2257 sib_info->forw = cpu_to_be32(dead_blkno);
2258 xfs_trans_log_buf(tp, sib_buf,
2259 XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
2260 sizeof(sib_info->forw)));
2264 * If the moved block has a right sibling, fix up the pointers.
2266 if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
2267 error = xfs_da3_node_read(tp, ip, sib_blkno, -1, &sib_buf, w);
2270 sib_info = sib_buf->b_addr;
2272 be32_to_cpu(sib_info->back) != last_blkno ||
2273 sib_info->magic != dead_info->magic)) {
2274 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
2275 XFS_ERRLEVEL_LOW, mp);
2276 error = XFS_ERROR(EFSCORRUPTED);
2279 sib_info->back = cpu_to_be32(dead_blkno);
2280 xfs_trans_log_buf(tp, sib_buf,
2281 XFS_DA_LOGRANGE(sib_info, &sib_info->back,
2282 sizeof(sib_info->back)));
2285 par_blkno = mp->m_dirleafblk;
2288 * Walk down the tree looking for the parent of the moved block.
2291 error = xfs_da3_node_read(tp, ip, par_blkno, -1, &par_buf, w);
2294 par_node = par_buf->b_addr;
2295 xfs_da3_node_hdr_from_disk(&par_hdr, par_node);
2296 if (level >= 0 && level != par_hdr.level + 1) {
2297 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
2298 XFS_ERRLEVEL_LOW, mp);
2299 error = XFS_ERROR(EFSCORRUPTED);
2302 level = par_hdr.level;
2303 btree = xfs_da3_node_tree_p(par_node);
2305 entno < par_hdr.count &&
2306 be32_to_cpu(btree[entno].hashval) < dead_hash;
2309 if (entno == par_hdr.count) {
2310 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
2311 XFS_ERRLEVEL_LOW, mp);
2312 error = XFS_ERROR(EFSCORRUPTED);
2315 par_blkno = be32_to_cpu(btree[entno].before);
2316 if (level == dead_level + 1)
2318 xfs_trans_brelse(tp, par_buf);
2322 * We're in the right parent block.
2323 * Look for the right entry.
2327 entno < par_hdr.count &&
2328 be32_to_cpu(btree[entno].before) != last_blkno;
2331 if (entno < par_hdr.count)
2333 par_blkno = par_hdr.forw;
2334 xfs_trans_brelse(tp, par_buf);
2336 if (unlikely(par_blkno == 0)) {
2337 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
2338 XFS_ERRLEVEL_LOW, mp);
2339 error = XFS_ERROR(EFSCORRUPTED);
2342 error = xfs_da3_node_read(tp, ip, par_blkno, -1, &par_buf, w);
2345 par_node = par_buf->b_addr;
2346 xfs_da3_node_hdr_from_disk(&par_hdr, par_node);
2347 if (par_hdr.level != level) {
2348 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
2349 XFS_ERRLEVEL_LOW, mp);
2350 error = XFS_ERROR(EFSCORRUPTED);
2353 btree = xfs_da3_node_tree_p(par_node);
2357 * Update the parent entry pointing to the moved block.
2359 btree[entno].before = cpu_to_be32(dead_blkno);
2360 xfs_trans_log_buf(tp, par_buf,
2361 XFS_DA_LOGRANGE(par_node, &btree[entno].before,
2362 sizeof(btree[entno].before)));
2363 *dead_blknop = last_blkno;
2364 *dead_bufp = last_buf;
2368 xfs_trans_brelse(tp, par_buf);
2370 xfs_trans_brelse(tp, sib_buf);
2371 xfs_trans_brelse(tp, last_buf);
2376 * Remove a btree block from a directory or attribute.
2379 xfs_da_shrink_inode(
2380 xfs_da_args_t *args,
2381 xfs_dablk_t dead_blkno,
2382 struct xfs_buf *dead_buf)
2385 int done, error, w, count;
2389 trace_xfs_da_shrink_inode(args);
2392 w = args->whichfork;
2395 if (w == XFS_DATA_FORK)
2396 count = mp->m_dirblkfsbs;
2401 * Remove extents. If we get ENOSPC for a dir we have to move
2402 * the last block to the place we want to kill.
2404 error = xfs_bunmapi(tp, dp, dead_blkno, count,
2405 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
2406 0, args->firstblock, args->flist, &done);
2407 if (error == ENOSPC) {
2408 if (w != XFS_DATA_FORK)
2410 error = xfs_da3_swap_lastblock(args, &dead_blkno,
2418 xfs_trans_binval(tp, dead_buf);
2423 * See if the mapping(s) for this btree block are valid, i.e.
2424 * don't contain holes, are logically contiguous, and cover the whole range.
2427 xfs_da_map_covers_blocks(
2429 xfs_bmbt_irec_t *mapp,
2436 for (i = 0, off = bno; i < nmap; i++) {
2437 if (mapp[i].br_startblock == HOLESTARTBLOCK ||
2438 mapp[i].br_startblock == DELAYSTARTBLOCK) {
2441 if (off != mapp[i].br_startoff) {
2444 off += mapp[i].br_blockcount;
2446 return off == bno + count;
2450 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
2452 * For the single map case, it is assumed that the caller has provided a pointer
2453 * to a valid xfs_buf_map. For the multiple map case, this function will
2454 * allocate the xfs_buf_map to hold all the maps and replace the caller's single
2455 * map pointer with the allocated map.
2458 xfs_buf_map_from_irec(
2459 struct xfs_mount *mp,
2460 struct xfs_buf_map **mapp,
2462 struct xfs_bmbt_irec *irecs,
2465 struct xfs_buf_map *map;
2468 ASSERT(*nmaps == 1);
2469 ASSERT(nirecs >= 1);
2472 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
2473 KM_SLEEP | KM_NOFS);
2481 for (i = 0; i < *nmaps; i++) {
2482 ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK &&
2483 irecs[i].br_startblock != HOLESTARTBLOCK);
2484 map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
2485 map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
2491 * Map the block we are given ready for reading. There are three possible return
2493 * -1 - will be returned if we land in a hole and mappedbno == -2 so the
2494 * caller knows not to execute a subsequent read.
2495 * 0 - if we mapped the block successfully
2496 * >0 - positive error number if there was an error.
2500 struct xfs_trans *trans,
2501 struct xfs_inode *dp,
2503 xfs_daddr_t mappedbno,
2505 struct xfs_buf_map **map,
2508 struct xfs_mount *mp = dp->i_mount;
2511 struct xfs_bmbt_irec irec;
2512 struct xfs_bmbt_irec *irecs = &irec;
2515 ASSERT(map && *map);
2516 ASSERT(*nmaps == 1);
2518 nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1;
2521 * Caller doesn't have a mapping. -2 means don't complain
2522 * if we land in a hole.
2524 if (mappedbno == -1 || mappedbno == -2) {
2526 * Optimize the one-block case.
2529 irecs = kmem_zalloc(sizeof(irec) * nfsb,
2530 KM_SLEEP | KM_NOFS);
2533 error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
2534 &nirecs, xfs_bmapi_aflag(whichfork));
2538 irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
2539 irecs->br_startoff = (xfs_fileoff_t)bno;
2540 irecs->br_blockcount = nfsb;
2541 irecs->br_state = 0;
2545 if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) {
2546 error = mappedbno == -2 ? -1 : XFS_ERROR(EFSCORRUPTED);
2547 if (unlikely(error == EFSCORRUPTED)) {
2548 if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2550 xfs_alert(mp, "%s: bno %lld dir: inode %lld",
2551 __func__, (long long)bno,
2552 (long long)dp->i_ino);
2553 for (i = 0; i < *nmaps; i++) {
2555 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2557 (long long)irecs[i].br_startoff,
2558 (long long)irecs[i].br_startblock,
2559 (long long)irecs[i].br_blockcount,
2563 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2564 XFS_ERRLEVEL_LOW, mp);
2568 error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs);
2576 * Get a buffer for the dir/attr block.
2580 struct xfs_trans *trans,
2581 struct xfs_inode *dp,
2583 xfs_daddr_t mappedbno,
2584 struct xfs_buf **bpp,
2588 struct xfs_buf_map map;
2589 struct xfs_buf_map *mapp;
2596 error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
2599 /* mapping a hole is not an error, but we don't continue */
2605 bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp,
2607 error = bp ? bp->b_error : XFS_ERROR(EIO);
2609 xfs_trans_brelse(trans, bp);
2623 * Get a buffer for the dir/attr block, fill in the contents.
2627 struct xfs_trans *trans,
2628 struct xfs_inode *dp,
2630 xfs_daddr_t mappedbno,
2631 struct xfs_buf **bpp,
2633 const struct xfs_buf_ops *ops)
2636 struct xfs_buf_map map;
2637 struct xfs_buf_map *mapp;
2644 error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
2647 /* mapping a hole is not an error, but we don't continue */
2653 error = xfs_trans_read_buf_map(dp->i_mount, trans,
2654 dp->i_mount->m_ddev_targp,
2655 mapp, nmap, 0, &bp, ops);
2659 if (whichfork == XFS_ATTR_FORK)
2660 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
2662 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
2665 * This verification code will be moved to a CRC verification callback
2666 * function so just leave it here unchanged until then.
2669 xfs_dir2_data_hdr_t *hdr = bp->b_addr;
2670 xfs_dir2_free_t *free = bp->b_addr;
2671 xfs_da_blkinfo_t *info = bp->b_addr;
2673 struct xfs_mount *mp = dp->i_mount;
2675 magic = be16_to_cpu(info->magic);
2676 magic1 = be32_to_cpu(hdr->magic);
2678 XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
2679 (magic != XFS_DA3_NODE_MAGIC) &&
2680 (magic != XFS_ATTR_LEAF_MAGIC) &&
2681 (magic != XFS_ATTR3_LEAF_MAGIC) &&
2682 (magic != XFS_DIR2_LEAF1_MAGIC) &&
2683 (magic != XFS_DIR3_LEAF1_MAGIC) &&
2684 (magic != XFS_DIR2_LEAFN_MAGIC) &&
2685 (magic != XFS_DIR3_LEAFN_MAGIC) &&
2686 (magic1 != XFS_DIR2_BLOCK_MAGIC) &&
2687 (magic1 != XFS_DIR3_BLOCK_MAGIC) &&
2688 (magic1 != XFS_DIR2_DATA_MAGIC) &&
2689 (magic1 != XFS_DIR3_DATA_MAGIC) &&
2691 cpu_to_be32(XFS_DIR2_FREE_MAGIC)) &&
2693 cpu_to_be32(XFS_DIR3_FREE_MAGIC)),
2694 mp, XFS_ERRTAG_DA_READ_BUF,
2695 XFS_RANDOM_DA_READ_BUF))) {
2696 trace_xfs_da_btree_corrupt(bp, _RET_IP_);
2697 XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
2698 XFS_ERRLEVEL_LOW, mp, info);
2699 error = XFS_ERROR(EFSCORRUPTED);
2700 xfs_trans_brelse(trans, bp);
2713 * Readahead the dir/attr block.
2717 struct xfs_trans *trans,
2718 struct xfs_inode *dp,
2720 xfs_daddr_t mappedbno,
2722 const struct xfs_buf_ops *ops)
2724 struct xfs_buf_map map;
2725 struct xfs_buf_map *mapp;
2731 error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
2734 /* mapping a hole is not an error, but we don't continue */
2740 mappedbno = mapp[0].bm_bn;
2741 xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops);