2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_da_format.h"
29 #include "xfs_da_btree.h"
31 #include "xfs_inode.h"
32 #include "xfs_btree.h"
33 #include "xfs_trans.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_extfree_item.h"
36 #include "xfs_alloc.h"
38 #include "xfs_bmap_util.h"
39 #include "xfs_bmap_btree.h"
40 #include "xfs_rtalloc.h"
41 #include "xfs_error.h"
42 #include "xfs_quota.h"
43 #include "xfs_trans_space.h"
44 #include "xfs_buf_item.h"
45 #include "xfs_trace.h"
46 #include "xfs_symlink.h"
47 #include "xfs_attr_leaf.h"
48 #include "xfs_filestream.h"
50 #include "xfs_ag_resv.h"
51 #include "xfs_refcount.h"
52 #include "xfs_rmap_btree.h"
53 #include "xfs_icache.h"
56 kmem_zone_t *xfs_bmap_free_item_zone;
59 * Miscellaneous helper functions
63 * Compute and fill in the value of the maximum depth of a bmap btree
64 * in this filesystem. Done once, during mount.
67 xfs_bmap_compute_maxlevels(
68 xfs_mount_t *mp, /* file system mount structure */
69 int whichfork) /* data or attr fork */
71 int level; /* btree level */
72 uint maxblocks; /* max blocks at this level */
73 uint maxleafents; /* max leaf entries possible */
74 int maxrootrecs; /* max records in root block */
75 int minleafrecs; /* min records in leaf block */
76 int minnoderecs; /* min records in node block */
77 int sz; /* root block size */
80 * The maximum number of extents in a file, hence the maximum
81 * number of leaf entries, is controlled by the type of di_nextents
82 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
83 * (a signed 16-bit number, xfs_aextnum_t).
85 * Note that we can no longer assume that if we are in ATTR1 that
86 * the fork offset of all the inodes will be
87 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
88 * with ATTR2 and then mounted back with ATTR1, keeping the
89 * di_forkoff's fixed but probably at various positions. Therefore,
90 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
91 * of a minimum size available.
93 if (whichfork == XFS_DATA_FORK) {
94 maxleafents = MAXEXTNUM;
95 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
97 maxleafents = MAXAEXTNUM;
98 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
100 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
101 minleafrecs = mp->m_bmap_dmnr[0];
102 minnoderecs = mp->m_bmap_dmnr[1];
103 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
104 for (level = 1; maxblocks > 1; level++) {
105 if (maxblocks <= maxrootrecs)
108 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
110 mp->m_bm_maxlevels[whichfork] = level;
113 STATIC int /* error */
115 struct xfs_btree_cur *cur,
119 int *stat) /* success/failure */
121 cur->bc_rec.b.br_startoff = off;
122 cur->bc_rec.b.br_startblock = bno;
123 cur->bc_rec.b.br_blockcount = len;
124 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
127 STATIC int /* error */
129 struct xfs_btree_cur *cur,
133 int *stat) /* success/failure */
135 cur->bc_rec.b.br_startoff = off;
136 cur->bc_rec.b.br_startblock = bno;
137 cur->bc_rec.b.br_blockcount = len;
138 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
142 * Check if the inode needs to be converted to btree format.
144 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
146 return whichfork != XFS_COW_FORK &&
147 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
148 XFS_IFORK_NEXTENTS(ip, whichfork) >
149 XFS_IFORK_MAXEXT(ip, whichfork);
153 * Check if the inode should be converted to extent format.
155 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
157 return whichfork != XFS_COW_FORK &&
158 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
159 XFS_IFORK_NEXTENTS(ip, whichfork) <=
160 XFS_IFORK_MAXEXT(ip, whichfork);
164 * Update the record referred to by cur to the value given
165 * by [off, bno, len, state].
166 * This either works (return 0) or gets an EFSCORRUPTED error.
170 struct xfs_btree_cur *cur,
176 union xfs_btree_rec rec;
178 xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
179 return xfs_btree_update(cur, &rec);
183 * Compute the worst-case number of indirect blocks that will be used
184 * for ip's delayed extent of length "len".
187 xfs_bmap_worst_indlen(
188 xfs_inode_t *ip, /* incore inode pointer */
189 xfs_filblks_t len) /* delayed extent length */
191 int level; /* btree level number */
192 int maxrecs; /* maximum record count at this level */
193 xfs_mount_t *mp; /* mount structure */
194 xfs_filblks_t rval; /* return value */
195 xfs_filblks_t orig_len;
199 /* Calculate the worst-case size of the bmbt. */
201 maxrecs = mp->m_bmap_dmxr[0];
202 for (level = 0, rval = 0;
203 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
206 do_div(len, maxrecs);
209 rval += XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
214 maxrecs = mp->m_bmap_dmxr[1];
217 /* Calculate the worst-case size of the rmapbt. */
218 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
219 rval += 1 + xfs_rmapbt_calc_size(mp, orig_len) +
220 mp->m_rmap_maxlevels;
226 * Calculate the default attribute fork offset for newly created inodes.
229 xfs_default_attroffset(
230 struct xfs_inode *ip)
232 struct xfs_mount *mp = ip->i_mount;
235 if (mp->m_sb.sb_inodesize == 256) {
236 offset = XFS_LITINO(mp, ip->i_d.di_version) -
237 XFS_BMDR_SPACE_CALC(MINABTPTRS);
239 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
242 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
247 * Helper routine to reset inode di_forkoff field when switching
248 * attribute fork from local to extent format - we reset it where
249 * possible to make space available for inline data fork extents.
252 xfs_bmap_forkoff_reset(
256 if (whichfork == XFS_ATTR_FORK &&
257 ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
258 ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
259 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
260 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
262 if (dfl_forkoff > ip->i_d.di_forkoff)
263 ip->i_d.di_forkoff = dfl_forkoff;
268 STATIC struct xfs_buf *
270 struct xfs_btree_cur *cur,
273 struct xfs_log_item_desc *lidp;
279 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
280 if (!cur->bc_bufs[i])
282 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
283 return cur->bc_bufs[i];
286 /* Chase down all the log items to see if the bp is there */
287 list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) {
288 struct xfs_buf_log_item *bip;
289 bip = (struct xfs_buf_log_item *)lidp->lid_item;
290 if (bip->bli_item.li_type == XFS_LI_BUF &&
291 XFS_BUF_ADDR(bip->bli_buf) == bno)
300 struct xfs_btree_block *block,
306 __be64 *pp, *thispa; /* pointer to block address */
307 xfs_bmbt_key_t *prevp, *keyp;
309 ASSERT(be16_to_cpu(block->bb_level) > 0);
312 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
313 dmxr = mp->m_bmap_dmxr[0];
314 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
317 ASSERT(be64_to_cpu(prevp->br_startoff) <
318 be64_to_cpu(keyp->br_startoff));
323 * Compare the block numbers to see if there are dups.
326 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
328 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
330 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
332 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
334 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
335 if (*thispa == *pp) {
336 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
338 (unsigned long long)be64_to_cpu(*thispa));
339 panic("%s: ptrs are equal in node\n",
347 * Check that the extents for the inode ip are in the right order in all
348 * btree leaves. THis becomes prohibitively expensive for large extent count
349 * files, so don't bother with inodes that have more than 10,000 extents in
350 * them. The btree record ordering checks will still be done, so for such large
351 * bmapbt constructs that is going to catch most corruptions.
354 xfs_bmap_check_leaf_extents(
355 xfs_btree_cur_t *cur, /* btree cursor or null */
356 xfs_inode_t *ip, /* incore inode pointer */
357 int whichfork) /* data or attr fork */
359 struct xfs_btree_block *block; /* current btree block */
360 xfs_fsblock_t bno; /* block # of "block" */
361 xfs_buf_t *bp; /* buffer for "block" */
362 int error; /* error return value */
363 xfs_extnum_t i=0, j; /* index into the extents list */
364 xfs_ifork_t *ifp; /* fork structure */
365 int level; /* btree level, for checking */
366 xfs_mount_t *mp; /* file system mount structure */
367 __be64 *pp; /* pointer to block address */
368 xfs_bmbt_rec_t *ep; /* pointer to current extent */
369 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
370 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
373 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
377 /* skip large extent count inodes */
378 if (ip->i_d.di_nextents > 10000)
383 ifp = XFS_IFORK_PTR(ip, whichfork);
384 block = ifp->if_broot;
386 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
388 level = be16_to_cpu(block->bb_level);
390 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
391 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
392 bno = be64_to_cpu(*pp);
394 ASSERT(bno != NULLFSBLOCK);
395 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
396 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
399 * Go down the tree until leaf level is reached, following the first
400 * pointer (leftmost) at each level.
402 while (level-- > 0) {
403 /* See if buf is in cur first */
405 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
408 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
414 block = XFS_BUF_TO_BLOCK(bp);
419 * Check this block for basic sanity (increasing keys and
420 * no duplicate blocks).
423 xfs_check_block(block, mp, 0, 0);
424 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
425 bno = be64_to_cpu(*pp);
426 XFS_WANT_CORRUPTED_GOTO(mp,
427 XFS_FSB_SANITY_CHECK(mp, bno), error0);
430 xfs_trans_brelse(NULL, bp);
435 * Here with bp and block set to the leftmost leaf node in the tree.
440 * Loop over all leaf nodes checking that all extents are in the right order.
443 xfs_fsblock_t nextbno;
444 xfs_extnum_t num_recs;
447 num_recs = xfs_btree_get_numrecs(block);
450 * Read-ahead the next leaf block, if any.
453 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
456 * Check all the extents to make sure they are OK.
457 * If we had a previous block, the last entry should
458 * conform with the first entry in this one.
461 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
463 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
464 xfs_bmbt_disk_get_blockcount(&last) <=
465 xfs_bmbt_disk_get_startoff(ep));
467 for (j = 1; j < num_recs; j++) {
468 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
469 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
470 xfs_bmbt_disk_get_blockcount(ep) <=
471 xfs_bmbt_disk_get_startoff(nextp));
479 xfs_trans_brelse(NULL, bp);
483 * If we've reached the end, stop.
485 if (bno == NULLFSBLOCK)
489 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
492 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
498 block = XFS_BUF_TO_BLOCK(bp);
504 xfs_warn(mp, "%s: at error0", __func__);
506 xfs_trans_brelse(NULL, bp);
508 xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
510 panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
515 * Add bmap trace insert entries for all the contents of the extent records.
518 xfs_bmap_trace_exlist(
519 xfs_inode_t *ip, /* incore inode pointer */
520 xfs_extnum_t cnt, /* count of entries in the list */
521 int whichfork, /* data or attr or cow fork */
522 unsigned long caller_ip)
524 xfs_extnum_t idx; /* extent record index */
525 xfs_ifork_t *ifp; /* inode fork pointer */
528 if (whichfork == XFS_ATTR_FORK)
529 state |= BMAP_ATTRFORK;
530 else if (whichfork == XFS_COW_FORK)
531 state |= BMAP_COWFORK;
533 ifp = XFS_IFORK_PTR(ip, whichfork);
534 ASSERT(cnt == xfs_iext_count(ifp));
535 for (idx = 0; idx < cnt; idx++)
536 trace_xfs_extlist(ip, idx, state, caller_ip);
540 * Validate that the bmbt_irecs being returned from bmapi are valid
541 * given the caller's original parameters. Specifically check the
542 * ranges of the returned irecs to ensure that they only extend beyond
543 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
546 xfs_bmap_validate_ret(
550 xfs_bmbt_irec_t *mval,
554 int i; /* index to map values */
556 ASSERT(ret_nmap <= nmap);
558 for (i = 0; i < ret_nmap; i++) {
559 ASSERT(mval[i].br_blockcount > 0);
560 if (!(flags & XFS_BMAPI_ENTIRE)) {
561 ASSERT(mval[i].br_startoff >= bno);
562 ASSERT(mval[i].br_blockcount <= len);
563 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
566 ASSERT(mval[i].br_startoff < bno + len);
567 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
571 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
572 mval[i].br_startoff);
573 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
574 mval[i].br_startblock != HOLESTARTBLOCK);
575 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
576 mval[i].br_state == XFS_EXT_UNWRITTEN);
581 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
582 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
586 * bmap free list manipulation functions
590 * Add the extent to the list of extents to be free at transaction end.
591 * The list is maintained sorted (by block number).
595 struct xfs_mount *mp,
596 struct xfs_defer_ops *dfops,
599 struct xfs_owner_info *oinfo)
601 struct xfs_extent_free_item *new; /* new element */
606 ASSERT(bno != NULLFSBLOCK);
608 ASSERT(len <= MAXEXTLEN);
609 ASSERT(!isnullstartblock(bno));
610 agno = XFS_FSB_TO_AGNO(mp, bno);
611 agbno = XFS_FSB_TO_AGBNO(mp, bno);
612 ASSERT(agno < mp->m_sb.sb_agcount);
613 ASSERT(agbno < mp->m_sb.sb_agblocks);
614 ASSERT(len < mp->m_sb.sb_agblocks);
615 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
617 ASSERT(xfs_bmap_free_item_zone != NULL);
619 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
620 new->xefi_startblock = bno;
621 new->xefi_blockcount = (xfs_extlen_t)len;
623 new->xefi_oinfo = *oinfo;
625 xfs_rmap_skip_owner_update(&new->xefi_oinfo);
626 trace_xfs_bmap_free_defer(mp, XFS_FSB_TO_AGNO(mp, bno), 0,
627 XFS_FSB_TO_AGBNO(mp, bno), len);
628 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
632 * Inode fork format manipulation functions
636 * Transform a btree format file with only one leaf node, where the
637 * extents list will fit in the inode, into an extents format file.
638 * Since the file extents are already in-core, all we have to do is
639 * give up the space for the btree root and pitch the leaf block.
641 STATIC int /* error */
642 xfs_bmap_btree_to_extents(
643 xfs_trans_t *tp, /* transaction pointer */
644 xfs_inode_t *ip, /* incore inode pointer */
645 xfs_btree_cur_t *cur, /* btree cursor */
646 int *logflagsp, /* inode logging flags */
647 int whichfork) /* data or attr fork */
650 struct xfs_btree_block *cblock;/* child btree block */
651 xfs_fsblock_t cbno; /* child block number */
652 xfs_buf_t *cbp; /* child block's buffer */
653 int error; /* error return value */
654 xfs_ifork_t *ifp; /* inode fork data */
655 xfs_mount_t *mp; /* mount point structure */
656 __be64 *pp; /* ptr to block address */
657 struct xfs_btree_block *rblock;/* root btree block */
658 struct xfs_owner_info oinfo;
661 ifp = XFS_IFORK_PTR(ip, whichfork);
662 ASSERT(whichfork != XFS_COW_FORK);
663 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
664 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
665 rblock = ifp->if_broot;
666 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
667 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
668 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
669 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
670 cbno = be64_to_cpu(*pp);
673 if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
676 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF,
680 cblock = XFS_BUF_TO_BLOCK(cbp);
681 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
683 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
684 xfs_bmap_add_free(mp, cur->bc_private.b.dfops, cbno, 1, &oinfo);
685 ip->i_d.di_nblocks--;
686 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
687 xfs_trans_binval(tp, cbp);
688 if (cur->bc_bufs[0] == cbp)
689 cur->bc_bufs[0] = NULL;
690 xfs_iroot_realloc(ip, -1, whichfork);
691 ASSERT(ifp->if_broot == NULL);
692 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
693 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
694 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
699 * Convert an extents-format file into a btree-format file.
700 * The new file will have a root block (in the inode) and a single child block.
702 STATIC int /* error */
703 xfs_bmap_extents_to_btree(
704 xfs_trans_t *tp, /* transaction pointer */
705 xfs_inode_t *ip, /* incore inode pointer */
706 xfs_fsblock_t *firstblock, /* first-block-allocated */
707 struct xfs_defer_ops *dfops, /* blocks freed in xaction */
708 xfs_btree_cur_t **curp, /* cursor returned to caller */
709 int wasdel, /* converting a delayed alloc */
710 int *logflagsp, /* inode logging flags */
711 int whichfork) /* data or attr fork */
713 struct xfs_btree_block *ablock; /* allocated (child) bt block */
714 xfs_buf_t *abp; /* buffer for ablock */
715 xfs_alloc_arg_t args; /* allocation arguments */
716 xfs_bmbt_rec_t *arp; /* child record pointer */
717 struct xfs_btree_block *block; /* btree root block */
718 xfs_btree_cur_t *cur; /* bmap btree cursor */
719 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
720 int error; /* error return value */
721 xfs_extnum_t i, cnt; /* extent record index */
722 xfs_ifork_t *ifp; /* inode fork pointer */
723 xfs_bmbt_key_t *kp; /* root block key pointer */
724 xfs_mount_t *mp; /* mount structure */
725 xfs_extnum_t nextents; /* number of file extents */
726 xfs_bmbt_ptr_t *pp; /* root block address pointer */
729 ASSERT(whichfork != XFS_COW_FORK);
730 ifp = XFS_IFORK_PTR(ip, whichfork);
731 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
734 * Make space in the inode incore.
736 xfs_iroot_realloc(ip, 1, whichfork);
737 ifp->if_flags |= XFS_IFBROOT;
742 block = ifp->if_broot;
743 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
744 XFS_BTNUM_BMAP, 1, 1, ip->i_ino,
745 XFS_BTREE_LONG_PTRS);
747 * Need a cursor. Can't allocate until bb_level is filled in.
749 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
750 cur->bc_private.b.firstblock = *firstblock;
751 cur->bc_private.b.dfops = dfops;
752 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
754 * Convert to a btree with two levels, one record in root.
756 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
757 memset(&args, 0, sizeof(args));
760 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
761 args.firstblock = *firstblock;
762 if (*firstblock == NULLFSBLOCK) {
763 args.type = XFS_ALLOCTYPE_START_BNO;
764 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
765 } else if (dfops->dop_low) {
766 args.type = XFS_ALLOCTYPE_START_BNO;
768 args.fsbno = *firstblock;
770 args.type = XFS_ALLOCTYPE_NEAR_BNO;
771 args.fsbno = *firstblock;
773 args.minlen = args.maxlen = args.prod = 1;
774 args.wasdel = wasdel;
776 if ((error = xfs_alloc_vextent(&args))) {
777 xfs_iroot_realloc(ip, -1, whichfork);
778 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
783 * During a CoW operation, the allocation and bmbt updates occur in
784 * different transactions. The mapping code tries to put new bmbt
785 * blocks near extents being mapped, but the only way to guarantee this
786 * is if the alloc and the mapping happen in a single transaction that
787 * has a block reservation. That isn't the case here, so if we run out
788 * of space we'll try again with another AG.
790 if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
791 args.fsbno == NULLFSBLOCK &&
792 args.type == XFS_ALLOCTYPE_NEAR_BNO) {
793 args.type = XFS_ALLOCTYPE_FIRST_AG;
796 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
797 xfs_iroot_realloc(ip, -1, whichfork);
798 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
802 * Allocation can't fail, the space was reserved.
804 ASSERT(*firstblock == NULLFSBLOCK ||
805 args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock));
806 *firstblock = cur->bc_private.b.firstblock = args.fsbno;
807 cur->bc_private.b.allocated++;
808 ip->i_d.di_nblocks++;
809 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
810 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
812 * Fill in the child block.
814 abp->b_ops = &xfs_bmbt_buf_ops;
815 ablock = XFS_BUF_TO_BLOCK(abp);
816 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
817 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
818 XFS_BTREE_LONG_PTRS);
820 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
821 nextents = xfs_iext_count(ifp);
822 for (cnt = i = 0; i < nextents; i++) {
823 ep = xfs_iext_get_ext(ifp, i);
824 if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
825 arp->l0 = cpu_to_be64(ep->l0);
826 arp->l1 = cpu_to_be64(ep->l1);
830 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
831 xfs_btree_set_numrecs(ablock, cnt);
834 * Fill in the root key and pointer.
836 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
837 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
838 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
839 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
840 be16_to_cpu(block->bb_level)));
841 *pp = cpu_to_be64(args.fsbno);
844 * Do all this logging at the end so that
845 * the root is at the right level.
847 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
848 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
849 ASSERT(*curp == NULL);
851 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
856 * Convert a local file to an extents file.
857 * This code is out of bounds for data forks of regular files,
858 * since the file data needs to get logged so things will stay consistent.
859 * (The bmap-level manipulations are ok, though).
862 xfs_bmap_local_to_extents_empty(
863 struct xfs_inode *ip,
866 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
868 ASSERT(whichfork != XFS_COW_FORK);
869 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
870 ASSERT(ifp->if_bytes == 0);
871 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
873 xfs_bmap_forkoff_reset(ip, whichfork);
874 ifp->if_flags &= ~XFS_IFINLINE;
875 ifp->if_flags |= XFS_IFEXTENTS;
876 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
880 STATIC int /* error */
881 xfs_bmap_local_to_extents(
882 xfs_trans_t *tp, /* transaction pointer */
883 xfs_inode_t *ip, /* incore inode pointer */
884 xfs_fsblock_t *firstblock, /* first block allocated in xaction */
885 xfs_extlen_t total, /* total blocks needed by transaction */
886 int *logflagsp, /* inode logging flags */
888 void (*init_fn)(struct xfs_trans *tp,
890 struct xfs_inode *ip,
891 struct xfs_ifork *ifp))
894 int flags; /* logging flags returned */
895 xfs_ifork_t *ifp; /* inode fork pointer */
896 xfs_alloc_arg_t args; /* allocation arguments */
897 xfs_buf_t *bp; /* buffer for extent block */
898 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
901 * We don't want to deal with the case of keeping inode data inline yet.
902 * So sending the data fork of a regular inode is invalid.
904 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
905 ifp = XFS_IFORK_PTR(ip, whichfork);
906 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
908 if (!ifp->if_bytes) {
909 xfs_bmap_local_to_extents_empty(ip, whichfork);
910 flags = XFS_ILOG_CORE;
916 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) ==
918 memset(&args, 0, sizeof(args));
920 args.mp = ip->i_mount;
921 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
922 args.firstblock = *firstblock;
924 * Allocate a block. We know we need only one, since the
925 * file currently fits in an inode.
927 if (*firstblock == NULLFSBLOCK) {
929 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
930 args.type = XFS_ALLOCTYPE_START_BNO;
932 args.fsbno = *firstblock;
933 args.type = XFS_ALLOCTYPE_NEAR_BNO;
936 args.minlen = args.maxlen = args.prod = 1;
937 error = xfs_alloc_vextent(&args);
942 * During a CoW operation, the allocation and bmbt updates occur in
943 * different transactions. The mapping code tries to put new bmbt
944 * blocks near extents being mapped, but the only way to guarantee this
945 * is if the alloc and the mapping happen in a single transaction that
946 * has a block reservation. That isn't the case here, so if we run out
947 * of space we'll try again with another AG.
949 if (xfs_sb_version_hasreflink(&ip->i_mount->m_sb) &&
950 args.fsbno == NULLFSBLOCK &&
951 args.type == XFS_ALLOCTYPE_NEAR_BNO) {
954 /* Can't fail, the space was reserved. */
955 ASSERT(args.fsbno != NULLFSBLOCK);
956 ASSERT(args.len == 1);
957 *firstblock = args.fsbno;
958 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
961 * Initialize the block, copy the data and log the remote buffer.
963 * The callout is responsible for logging because the remote format
964 * might differ from the local format and thus we don't know how much to
965 * log here. Note that init_fn must also set the buffer log item type
968 init_fn(tp, bp, ip, ifp);
970 /* account for the change in fork size */
971 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
972 xfs_bmap_local_to_extents_empty(ip, whichfork);
973 flags |= XFS_ILOG_CORE;
975 xfs_iext_add(ifp, 0, 1);
976 ep = xfs_iext_get_ext(ifp, 0);
977 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
978 trace_xfs_bmap_post_update(ip, 0,
979 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
981 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
982 ip->i_d.di_nblocks = 1;
983 xfs_trans_mod_dquot_byino(tp, ip,
984 XFS_TRANS_DQ_BCOUNT, 1L);
985 flags |= xfs_ilog_fext(whichfork);
993 * Called from xfs_bmap_add_attrfork to handle btree format files.
995 STATIC int /* error */
996 xfs_bmap_add_attrfork_btree(
997 xfs_trans_t *tp, /* transaction pointer */
998 xfs_inode_t *ip, /* incore inode pointer */
999 xfs_fsblock_t *firstblock, /* first block allocated */
1000 struct xfs_defer_ops *dfops, /* blocks to free at commit */
1001 int *flags) /* inode logging flags */
1003 xfs_btree_cur_t *cur; /* btree cursor */
1004 int error; /* error return value */
1005 xfs_mount_t *mp; /* file system mount struct */
1006 int stat; /* newroot status */
1009 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
1010 *flags |= XFS_ILOG_DBROOT;
1012 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
1013 cur->bc_private.b.dfops = dfops;
1014 cur->bc_private.b.firstblock = *firstblock;
1015 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
1017 /* must be at least one entry */
1018 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
1019 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
1022 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1025 *firstblock = cur->bc_private.b.firstblock;
1026 cur->bc_private.b.allocated = 0;
1027 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1031 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1036 * Called from xfs_bmap_add_attrfork to handle extents format files.
1038 STATIC int /* error */
1039 xfs_bmap_add_attrfork_extents(
1040 xfs_trans_t *tp, /* transaction pointer */
1041 xfs_inode_t *ip, /* incore inode pointer */
1042 xfs_fsblock_t *firstblock, /* first block allocated */
1043 struct xfs_defer_ops *dfops, /* blocks to free at commit */
1044 int *flags) /* inode logging flags */
1046 xfs_btree_cur_t *cur; /* bmap btree cursor */
1047 int error; /* error return value */
1049 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
1052 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, &cur, 0,
1053 flags, XFS_DATA_FORK);
1055 cur->bc_private.b.allocated = 0;
1056 xfs_btree_del_cursor(cur,
1057 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
1063 * Called from xfs_bmap_add_attrfork to handle local format files. Each
1064 * different data fork content type needs a different callout to do the
1065 * conversion. Some are basic and only require special block initialisation
1066 * callouts for the data formating, others (directories) are so specialised they
1067 * handle everything themselves.
1069 * XXX (dgc): investigate whether directory conversion can use the generic
1070 * formatting callout. It should be possible - it's just a very complex
1073 STATIC int /* error */
1074 xfs_bmap_add_attrfork_local(
1075 xfs_trans_t *tp, /* transaction pointer */
1076 xfs_inode_t *ip, /* incore inode pointer */
1077 xfs_fsblock_t *firstblock, /* first block allocated */
1078 struct xfs_defer_ops *dfops, /* blocks to free at commit */
1079 int *flags) /* inode logging flags */
1081 xfs_da_args_t dargs; /* args for dir/attr code */
1083 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
1086 if (S_ISDIR(VFS_I(ip)->i_mode)) {
1087 memset(&dargs, 0, sizeof(dargs));
1088 dargs.geo = ip->i_mount->m_dir_geo;
1090 dargs.firstblock = firstblock;
1091 dargs.dfops = dfops;
1092 dargs.total = dargs.geo->fsbcount;
1093 dargs.whichfork = XFS_DATA_FORK;
1095 return xfs_dir2_sf_to_block(&dargs);
1098 if (S_ISLNK(VFS_I(ip)->i_mode))
1099 return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
1100 flags, XFS_DATA_FORK,
1101 xfs_symlink_local_to_remote);
1103 /* should only be called for types that support local format data */
1105 return -EFSCORRUPTED;
1109 * Convert inode from non-attributed to attributed.
1110 * Must not be in a transaction, ip must not be locked.
1112 int /* error code */
1113 xfs_bmap_add_attrfork(
1114 xfs_inode_t *ip, /* incore inode pointer */
1115 int size, /* space new attribute needs */
1116 int rsvd) /* xact may use reserved blks */
1118 xfs_fsblock_t firstblock; /* 1st block/ag allocated */
1119 struct xfs_defer_ops dfops; /* freed extent records */
1120 xfs_mount_t *mp; /* mount structure */
1121 xfs_trans_t *tp; /* transaction pointer */
1122 int blks; /* space reservation */
1123 int version = 1; /* superblock attr version */
1124 int logflags; /* logging flags */
1125 int error; /* error return value */
1127 ASSERT(XFS_IFORK_Q(ip) == 0);
1130 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1132 blks = XFS_ADDAFORK_SPACE_RES(mp);
1134 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0,
1135 rsvd ? XFS_TRANS_RESERVE : 0, &tp);
1139 xfs_ilock(ip, XFS_ILOCK_EXCL);
1140 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
1141 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
1142 XFS_QMOPT_RES_REGBLKS);
1145 if (XFS_IFORK_Q(ip))
1147 if (ip->i_d.di_anextents != 0) {
1148 error = -EFSCORRUPTED;
1151 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
1153 * For inodes coming from pre-6.2 filesystems.
1155 ASSERT(ip->i_d.di_aformat == 0);
1156 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1159 xfs_trans_ijoin(tp, ip, 0);
1160 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1162 switch (ip->i_d.di_format) {
1163 case XFS_DINODE_FMT_DEV:
1164 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
1166 case XFS_DINODE_FMT_UUID:
1167 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
1169 case XFS_DINODE_FMT_LOCAL:
1170 case XFS_DINODE_FMT_EXTENTS:
1171 case XFS_DINODE_FMT_BTREE:
1172 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1173 if (!ip->i_d.di_forkoff)
1174 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
1175 else if (mp->m_flags & XFS_MOUNT_ATTR2)
1184 ASSERT(ip->i_afp == NULL);
1185 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
1186 ip->i_afp->if_flags = XFS_IFEXTENTS;
1188 xfs_defer_init(&dfops, &firstblock);
1189 switch (ip->i_d.di_format) {
1190 case XFS_DINODE_FMT_LOCAL:
1191 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &dfops,
1194 case XFS_DINODE_FMT_EXTENTS:
1195 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
1198 case XFS_DINODE_FMT_BTREE:
1199 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &dfops,
1207 xfs_trans_log_inode(tp, ip, logflags);
1210 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1211 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
1212 bool log_sb = false;
1214 spin_lock(&mp->m_sb_lock);
1215 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
1216 xfs_sb_version_addattr(&mp->m_sb);
1219 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
1220 xfs_sb_version_addattr2(&mp->m_sb);
1223 spin_unlock(&mp->m_sb_lock);
1228 error = xfs_defer_finish(&tp, &dfops, NULL);
1231 error = xfs_trans_commit(tp);
1232 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1236 xfs_defer_cancel(&dfops);
1238 xfs_trans_cancel(tp);
1239 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1244 * Internal and external extent tree search functions.
1248 * Read in the extents to if_extents.
1249 * All inode fields are set up by caller, we just traverse the btree
1250 * and copy the records in. If the file system cannot contain unwritten
1251 * extents, the records are checked for no "state" flags.
1254 xfs_bmap_read_extents(
1255 xfs_trans_t *tp, /* transaction pointer */
1256 xfs_inode_t *ip, /* incore inode */
1257 int whichfork) /* data or attr fork */
1259 struct xfs_btree_block *block; /* current btree block */
1260 xfs_fsblock_t bno; /* block # of "block" */
1261 xfs_buf_t *bp; /* buffer for "block" */
1262 int error; /* error return value */
1263 xfs_exntfmt_t exntf; /* XFS_EXTFMT_NOSTATE, if checking */
1264 xfs_extnum_t i, j; /* index into the extents list */
1265 xfs_ifork_t *ifp; /* fork structure */
1266 int level; /* btree level, for checking */
1267 xfs_mount_t *mp; /* file system mount structure */
1268 __be64 *pp; /* pointer to block address */
1270 xfs_extnum_t room; /* number of entries there's room for */
1273 ifp = XFS_IFORK_PTR(ip, whichfork);
1274 exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
1275 XFS_EXTFMT_INODE(ip);
1276 block = ifp->if_broot;
1278 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
1280 level = be16_to_cpu(block->bb_level);
1282 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
1283 bno = be64_to_cpu(*pp);
1286 * Go down the tree until leaf level is reached, following the first
1287 * pointer (leftmost) at each level.
1289 while (level-- > 0) {
1290 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1291 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1294 block = XFS_BUF_TO_BLOCK(bp);
1297 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
1298 bno = be64_to_cpu(*pp);
1299 XFS_WANT_CORRUPTED_GOTO(mp,
1300 XFS_FSB_SANITY_CHECK(mp, bno), error0);
1301 xfs_trans_brelse(tp, bp);
1304 * Here with bp and block set to the leftmost leaf node in the tree.
1306 room = xfs_iext_count(ifp);
1309 * Loop over all leaf nodes. Copy information to the extent records.
1312 xfs_bmbt_rec_t *frp;
1313 xfs_fsblock_t nextbno;
1314 xfs_extnum_t num_recs;
1317 num_recs = xfs_btree_get_numrecs(block);
1318 if (unlikely(i + num_recs > room)) {
1319 ASSERT(i + num_recs <= room);
1320 xfs_warn(ip->i_mount,
1321 "corrupt dinode %Lu, (btree extents).",
1322 (unsigned long long) ip->i_ino);
1323 XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)",
1324 XFS_ERRLEVEL_LOW, ip->i_mount, block);
1328 * Read-ahead the next leaf block, if any.
1330 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
1331 if (nextbno != NULLFSBLOCK)
1332 xfs_btree_reada_bufl(mp, nextbno, 1,
1335 * Copy records into the extent records.
1337 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1339 for (j = 0; j < num_recs; j++, i++, frp++) {
1340 xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
1341 trp->l0 = be64_to_cpu(frp->l0);
1342 trp->l1 = be64_to_cpu(frp->l1);
1344 if (exntf == XFS_EXTFMT_NOSTATE) {
1346 * Check all attribute bmap btree records and
1347 * any "older" data bmap btree records for a
1348 * set bit in the "extent flag" position.
1350 if (unlikely(xfs_check_nostate_extents(ifp,
1351 start, num_recs))) {
1352 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
1358 xfs_trans_brelse(tp, bp);
1361 * If we've reached the end, stop.
1363 if (bno == NULLFSBLOCK)
1365 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1366 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1369 block = XFS_BUF_TO_BLOCK(bp);
1371 if (i != XFS_IFORK_NEXTENTS(ip, whichfork))
1372 return -EFSCORRUPTED;
1373 ASSERT(i == xfs_iext_count(ifp));
1374 XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
1377 xfs_trans_brelse(tp, bp);
1378 return -EFSCORRUPTED;
1382 * Returns the file-relative block number of the first unused block(s)
1383 * in the file with at least "len" logically contiguous blocks free.
1384 * This is the lowest-address hole if the file has holes, else the first block
1385 * past the end of file.
1386 * Return 0 if the file is currently local (in-inode).
1389 xfs_bmap_first_unused(
1390 xfs_trans_t *tp, /* transaction pointer */
1391 xfs_inode_t *ip, /* incore inode */
1392 xfs_extlen_t len, /* size of hole to find */
1393 xfs_fileoff_t *first_unused, /* unused block */
1394 int whichfork) /* data or attr fork */
1396 int error; /* error return value */
1397 int idx; /* extent record index */
1398 xfs_ifork_t *ifp; /* inode fork pointer */
1399 xfs_fileoff_t lastaddr; /* last block number seen */
1400 xfs_fileoff_t lowest; /* lowest useful block */
1401 xfs_fileoff_t max; /* starting useful block */
1402 xfs_fileoff_t off; /* offset for this block */
1403 xfs_extnum_t nextents; /* number of extent entries */
1405 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
1406 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
1407 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
1408 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1412 ifp = XFS_IFORK_PTR(ip, whichfork);
1413 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
1414 (error = xfs_iread_extents(tp, ip, whichfork)))
1416 lowest = *first_unused;
1417 nextents = xfs_iext_count(ifp);
1418 for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
1419 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
1420 off = xfs_bmbt_get_startoff(ep);
1422 * See if the hole before this extent will work.
1424 if (off >= lowest + len && off - max >= len) {
1425 *first_unused = max;
1428 lastaddr = off + xfs_bmbt_get_blockcount(ep);
1429 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1431 *first_unused = max;
1436 * Returns the file-relative block number of the last block - 1 before
1437 * last_block (input value) in the file.
1438 * This is not based on i_size, it is based on the extent records.
1439 * Returns 0 for local files, as they do not have extent records.
1442 xfs_bmap_last_before(
1443 struct xfs_trans *tp, /* transaction pointer */
1444 struct xfs_inode *ip, /* incore inode */
1445 xfs_fileoff_t *last_block, /* last block */
1446 int whichfork) /* data or attr fork */
1448 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1449 struct xfs_bmbt_irec got;
1453 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
1454 case XFS_DINODE_FMT_LOCAL:
1457 case XFS_DINODE_FMT_BTREE:
1458 case XFS_DINODE_FMT_EXTENTS:
1464 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1465 error = xfs_iread_extents(tp, ip, whichfork);
1470 if (xfs_iext_lookup_extent(ip, ifp, *last_block - 1, &idx, &got)) {
1471 if (got.br_startoff <= *last_block - 1)
1475 if (xfs_iext_get_extent(ifp, idx - 1, &got)) {
1476 *last_block = got.br_startoff + got.br_blockcount;
1485 xfs_bmap_last_extent(
1486 struct xfs_trans *tp,
1487 struct xfs_inode *ip,
1489 struct xfs_bmbt_irec *rec,
1492 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1496 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1497 error = xfs_iread_extents(tp, ip, whichfork);
1502 nextents = xfs_iext_count(ifp);
1503 if (nextents == 0) {
1508 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec);
1514 * Check the last inode extent to determine whether this allocation will result
1515 * in blocks being allocated at the end of the file. When we allocate new data
1516 * blocks at the end of the file which do not start at the previous data block,
1517 * we will try to align the new blocks at stripe unit boundaries.
1519 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1520 * at, or past the EOF.
1524 struct xfs_bmalloca *bma,
1527 struct xfs_bmbt_irec rec;
1532 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1543 * Check if we are allocation or past the last extent, or at least into
1544 * the last delayed allocated extent.
1546 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1547 (bma->offset >= rec.br_startoff &&
1548 isnullstartblock(rec.br_startblock));
1553 * Returns the file-relative block number of the first block past eof in
1554 * the file. This is not based on i_size, it is based on the extent records.
1555 * Returns 0 for local files, as they do not have extent records.
1558 xfs_bmap_last_offset(
1559 struct xfs_inode *ip,
1560 xfs_fileoff_t *last_block,
1563 struct xfs_bmbt_irec rec;
1569 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
1572 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1573 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1576 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1577 if (error || is_empty)
1580 *last_block = rec.br_startoff + rec.br_blockcount;
1585 * Returns whether the selected fork of the inode has exactly one
1586 * block or not. For the data fork we check this matches di_size,
1587 * implying the file's range is 0..bsize-1.
1589 int /* 1=>1 block, 0=>otherwise */
1591 xfs_inode_t *ip, /* incore inode */
1592 int whichfork) /* data or attr fork */
1594 xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */
1595 xfs_ifork_t *ifp; /* inode fork pointer */
1596 int rval; /* return value */
1597 xfs_bmbt_irec_t s; /* internal version of extent */
1600 if (whichfork == XFS_DATA_FORK)
1601 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
1603 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
1605 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1607 ifp = XFS_IFORK_PTR(ip, whichfork);
1608 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
1609 ep = xfs_iext_get_ext(ifp, 0);
1610 xfs_bmbt_get_all(ep, &s);
1611 rval = s.br_startoff == 0 && s.br_blockcount == 1;
1612 if (rval && whichfork == XFS_DATA_FORK)
1613 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
1618 * Extent tree manipulation functions used during allocation.
1622 * Convert a delayed allocation to a real allocation.
1624 STATIC int /* error */
1625 xfs_bmap_add_extent_delay_real(
1626 struct xfs_bmalloca *bma,
1629 struct xfs_bmbt_irec *new = &bma->got;
1630 int diff; /* temp value */
1631 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
1632 int error; /* error return value */
1633 int i; /* temp state */
1634 xfs_ifork_t *ifp; /* inode fork pointer */
1635 xfs_fileoff_t new_endoff; /* end offset of new entry */
1636 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1637 /* left is 0, right is 1, prev is 2 */
1638 int rval=0; /* return value (logging flags) */
1639 int state = 0;/* state bits, accessed thru macros */
1640 xfs_filblks_t da_new; /* new count del alloc blocks used */
1641 xfs_filblks_t da_old; /* old count del alloc blocks used */
1642 xfs_filblks_t temp=0; /* value for da_new calculations */
1643 xfs_filblks_t temp2=0;/* value for da_new calculations */
1644 int tmp_rval; /* partial logging flags */
1645 struct xfs_mount *mp;
1646 xfs_extnum_t *nextents;
1648 mp = bma->ip->i_mount;
1649 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1650 ASSERT(whichfork != XFS_ATTR_FORK);
1651 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents :
1652 &bma->ip->i_d.di_nextents);
1654 ASSERT(bma->idx >= 0);
1655 ASSERT(bma->idx <= xfs_iext_count(ifp));
1656 ASSERT(!isnullstartblock(new->br_startblock));
1658 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
1660 XFS_STATS_INC(mp, xs_add_exlist);
1666 if (whichfork == XFS_COW_FORK)
1667 state |= BMAP_COWFORK;
1670 * Set up a bunch of variables to make the tests simpler.
1672 ep = xfs_iext_get_ext(ifp, bma->idx);
1673 xfs_bmbt_get_all(ep, &PREV);
1674 new_endoff = new->br_startoff + new->br_blockcount;
1675 ASSERT(PREV.br_startoff <= new->br_startoff);
1676 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1678 da_old = startblockval(PREV.br_startblock);
1682 * Set flags determining what part of the previous delayed allocation
1683 * extent is being replaced by a real allocation.
1685 if (PREV.br_startoff == new->br_startoff)
1686 state |= BMAP_LEFT_FILLING;
1687 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1688 state |= BMAP_RIGHT_FILLING;
1691 * Check and set flags if this segment has a left neighbor.
1692 * Don't set contiguous if the combined extent would be too large.
1695 state |= BMAP_LEFT_VALID;
1696 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT);
1698 if (isnullstartblock(LEFT.br_startblock))
1699 state |= BMAP_LEFT_DELAY;
1702 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1703 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1704 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1705 LEFT.br_state == new->br_state &&
1706 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1707 state |= BMAP_LEFT_CONTIG;
1710 * Check and set flags if this segment has a right neighbor.
1711 * Don't set contiguous if the combined extent would be too large.
1712 * Also check for all-three-contiguous being too large.
1714 if (bma->idx < xfs_iext_count(ifp) - 1) {
1715 state |= BMAP_RIGHT_VALID;
1716 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
1718 if (isnullstartblock(RIGHT.br_startblock))
1719 state |= BMAP_RIGHT_DELAY;
1722 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1723 new_endoff == RIGHT.br_startoff &&
1724 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1725 new->br_state == RIGHT.br_state &&
1726 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1727 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1728 BMAP_RIGHT_FILLING)) !=
1729 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1730 BMAP_RIGHT_FILLING) ||
1731 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1733 state |= BMAP_RIGHT_CONTIG;
1737 * Switch out based on the FILLING and CONTIG state bits.
1739 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1740 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1741 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1742 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1744 * Filling in all of a previously delayed allocation extent.
1745 * The left and right neighbors are both contiguous with new.
1748 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1749 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1750 LEFT.br_blockcount + PREV.br_blockcount +
1751 RIGHT.br_blockcount);
1752 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1754 xfs_iext_remove(bma->ip, bma->idx + 1, 2, state);
1756 if (bma->cur == NULL)
1757 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1759 rval = XFS_ILOG_CORE;
1760 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1761 RIGHT.br_startblock,
1762 RIGHT.br_blockcount, &i);
1765 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1766 error = xfs_btree_delete(bma->cur, &i);
1769 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1770 error = xfs_btree_decrement(bma->cur, 0, &i);
1773 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1774 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1776 LEFT.br_blockcount +
1777 PREV.br_blockcount +
1778 RIGHT.br_blockcount, LEFT.br_state);
1784 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1786 * Filling in all of a previously delayed allocation extent.
1787 * The left neighbor is contiguous, the right is not.
1791 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1792 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1793 LEFT.br_blockcount + PREV.br_blockcount);
1794 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1796 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1797 if (bma->cur == NULL)
1798 rval = XFS_ILOG_DEXT;
1801 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
1802 LEFT.br_startblock, LEFT.br_blockcount,
1806 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1807 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1809 LEFT.br_blockcount +
1810 PREV.br_blockcount, LEFT.br_state);
1816 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1818 * Filling in all of a previously delayed allocation extent.
1819 * The right neighbor is contiguous, the left is not.
1821 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1822 xfs_bmbt_set_startblock(ep, new->br_startblock);
1823 xfs_bmbt_set_blockcount(ep,
1824 PREV.br_blockcount + RIGHT.br_blockcount);
1825 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1827 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1828 if (bma->cur == NULL)
1829 rval = XFS_ILOG_DEXT;
1832 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1833 RIGHT.br_startblock,
1834 RIGHT.br_blockcount, &i);
1837 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1838 error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
1840 PREV.br_blockcount +
1841 RIGHT.br_blockcount, PREV.br_state);
1847 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1849 * Filling in all of a previously delayed allocation extent.
1850 * Neither the left nor right neighbors are contiguous with
1853 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1854 xfs_bmbt_set_startblock(ep, new->br_startblock);
1855 xfs_bmbt_set_state(ep, new->br_state);
1856 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1859 if (bma->cur == NULL)
1860 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1862 rval = XFS_ILOG_CORE;
1863 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
1864 new->br_startblock, new->br_blockcount,
1868 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1869 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
1870 error = xfs_btree_insert(bma->cur, &i);
1873 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1877 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1879 * Filling in the first part of a previous delayed allocation.
1880 * The left neighbor is contiguous.
1882 trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
1883 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1),
1884 LEFT.br_blockcount + new->br_blockcount);
1885 xfs_bmbt_set_startoff(ep,
1886 PREV.br_startoff + new->br_blockcount);
1887 trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
1889 temp = PREV.br_blockcount - new->br_blockcount;
1890 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1891 xfs_bmbt_set_blockcount(ep, temp);
1892 if (bma->cur == NULL)
1893 rval = XFS_ILOG_DEXT;
1896 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
1897 LEFT.br_startblock, LEFT.br_blockcount,
1901 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1902 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1904 LEFT.br_blockcount +
1910 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1911 startblockval(PREV.br_startblock));
1912 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
1913 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1918 case BMAP_LEFT_FILLING:
1920 * Filling in the first part of a previous delayed allocation.
1921 * The left neighbor is not contiguous.
1923 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1924 xfs_bmbt_set_startoff(ep, new_endoff);
1925 temp = PREV.br_blockcount - new->br_blockcount;
1926 xfs_bmbt_set_blockcount(ep, temp);
1927 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
1929 if (bma->cur == NULL)
1930 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1932 rval = XFS_ILOG_CORE;
1933 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
1934 new->br_startblock, new->br_blockcount,
1938 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1939 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
1940 error = xfs_btree_insert(bma->cur, &i);
1943 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1946 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1947 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1948 bma->firstblock, bma->dfops,
1949 &bma->cur, 1, &tmp_rval, whichfork);
1954 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1955 startblockval(PREV.br_startblock) -
1956 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1957 ep = xfs_iext_get_ext(ifp, bma->idx + 1);
1958 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
1959 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
1962 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1964 * Filling in the last part of a previous delayed allocation.
1965 * The right neighbor is contiguous with the new allocation.
1967 temp = PREV.br_blockcount - new->br_blockcount;
1968 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
1969 xfs_bmbt_set_blockcount(ep, temp);
1970 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1),
1971 new->br_startoff, new->br_startblock,
1972 new->br_blockcount + RIGHT.br_blockcount,
1974 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
1975 if (bma->cur == NULL)
1976 rval = XFS_ILOG_DEXT;
1979 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1980 RIGHT.br_startblock,
1981 RIGHT.br_blockcount, &i);
1984 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1985 error = xfs_bmbt_update(bma->cur, new->br_startoff,
1987 new->br_blockcount +
1988 RIGHT.br_blockcount,
1994 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1995 startblockval(PREV.br_startblock));
1996 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1997 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
1998 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2003 case BMAP_RIGHT_FILLING:
2005 * Filling in the last part of a previous delayed allocation.
2006 * The right neighbor is not contiguous.
2008 temp = PREV.br_blockcount - new->br_blockcount;
2009 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
2010 xfs_bmbt_set_blockcount(ep, temp);
2011 xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state);
2013 if (bma->cur == NULL)
2014 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2016 rval = XFS_ILOG_CORE;
2017 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
2018 new->br_startblock, new->br_blockcount,
2022 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2023 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2024 error = xfs_btree_insert(bma->cur, &i);
2027 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2030 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2031 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2032 bma->firstblock, bma->dfops, &bma->cur, 1,
2033 &tmp_rval, whichfork);
2038 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
2039 startblockval(PREV.br_startblock) -
2040 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
2041 ep = xfs_iext_get_ext(ifp, bma->idx);
2042 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
2043 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2050 * Filling in the middle part of a previous delayed allocation.
2051 * Contiguity is impossible here.
2052 * This case is avoided almost all the time.
2054 * We start with a delayed allocation:
2056 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
2059 * and we are allocating:
2060 * +rrrrrrrrrrrrrrrrr+
2063 * and we set it up for insertion as:
2064 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
2066 * PREV @ idx LEFT RIGHT
2067 * inserted at idx + 1
2069 temp = new->br_startoff - PREV.br_startoff;
2070 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
2071 trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_);
2072 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */
2074 RIGHT.br_state = PREV.br_state;
2075 RIGHT.br_startblock = nullstartblock(
2076 (int)xfs_bmap_worst_indlen(bma->ip, temp2));
2077 RIGHT.br_startoff = new_endoff;
2078 RIGHT.br_blockcount = temp2;
2079 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
2080 xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state);
2082 if (bma->cur == NULL)
2083 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2085 rval = XFS_ILOG_CORE;
2086 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
2087 new->br_startblock, new->br_blockcount,
2091 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2092 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2093 error = xfs_btree_insert(bma->cur, &i);
2096 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2099 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2100 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2101 bma->firstblock, bma->dfops, &bma->cur,
2102 1, &tmp_rval, whichfork);
2107 temp = xfs_bmap_worst_indlen(bma->ip, temp);
2108 temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
2109 diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
2110 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
2112 error = xfs_mod_fdblocks(bma->ip->i_mount,
2113 -((int64_t)diff), false);
2119 ep = xfs_iext_get_ext(ifp, bma->idx);
2120 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
2121 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2122 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
2123 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2),
2124 nullstartblock((int)temp2));
2125 trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
2128 da_new = temp + temp2;
2131 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2132 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2133 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2134 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2135 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2136 case BMAP_LEFT_CONTIG:
2137 case BMAP_RIGHT_CONTIG:
2139 * These cases are all impossible.
2144 /* add reverse mapping */
2145 error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new);
2149 /* convert to a btree if necessary */
2150 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2151 int tmp_logflags; /* partial log flag return val */
2153 ASSERT(bma->cur == NULL);
2154 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2155 bma->firstblock, bma->dfops, &bma->cur,
2156 da_old > 0, &tmp_logflags, whichfork);
2157 bma->logflags |= tmp_logflags;
2162 /* adjust for changes in reserved delayed indirect blocks */
2163 if (da_old || da_new) {
2166 temp += bma->cur->bc_private.b.allocated;
2167 ASSERT(temp <= da_old);
2169 xfs_mod_fdblocks(bma->ip->i_mount,
2170 (int64_t)(da_old - temp), false);
2173 /* clear out the allocated field, done with it now in any case. */
2175 bma->cur->bc_private.b.allocated = 0;
2177 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
2179 if (whichfork != XFS_COW_FORK)
2180 bma->logflags |= rval;
2188 * Convert an unwritten allocation to a real allocation or vice versa.
2190 STATIC int /* error */
2191 xfs_bmap_add_extent_unwritten_real(
2192 struct xfs_trans *tp,
2193 xfs_inode_t *ip, /* incore inode pointer */
2195 xfs_extnum_t *idx, /* extent number to update/insert */
2196 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
2197 xfs_bmbt_irec_t *new, /* new data to add to file extents */
2198 xfs_fsblock_t *first, /* pointer to firstblock variable */
2199 struct xfs_defer_ops *dfops, /* list of extents to be freed */
2200 int *logflagsp) /* inode logging flags */
2202 xfs_btree_cur_t *cur; /* btree cursor */
2203 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
2204 int error; /* error return value */
2205 int i; /* temp state */
2206 xfs_ifork_t *ifp; /* inode fork pointer */
2207 xfs_fileoff_t new_endoff; /* end offset of new entry */
2208 xfs_exntst_t newext; /* new extent state */
2209 xfs_exntst_t oldext; /* old extent state */
2210 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
2211 /* left is 0, right is 1, prev is 2 */
2212 int rval=0; /* return value (logging flags) */
2213 int state = 0;/* state bits, accessed thru macros */
2214 struct xfs_mount *mp = ip->i_mount;
2219 ifp = XFS_IFORK_PTR(ip, whichfork);
2220 if (whichfork == XFS_COW_FORK)
2221 state |= BMAP_COWFORK;
2224 ASSERT(*idx <= xfs_iext_count(ifp));
2225 ASSERT(!isnullstartblock(new->br_startblock));
2227 XFS_STATS_INC(mp, xs_add_exlist);
2234 * Set up a bunch of variables to make the tests simpler.
2237 ep = xfs_iext_get_ext(ifp, *idx);
2238 xfs_bmbt_get_all(ep, &PREV);
2239 newext = new->br_state;
2240 oldext = (newext == XFS_EXT_UNWRITTEN) ?
2241 XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
2242 ASSERT(PREV.br_state == oldext);
2243 new_endoff = new->br_startoff + new->br_blockcount;
2244 ASSERT(PREV.br_startoff <= new->br_startoff);
2245 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2248 * Set flags determining what part of the previous oldext allocation
2249 * extent is being replaced by a newext allocation.
2251 if (PREV.br_startoff == new->br_startoff)
2252 state |= BMAP_LEFT_FILLING;
2253 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2254 state |= BMAP_RIGHT_FILLING;
2257 * Check and set flags if this segment has a left neighbor.
2258 * Don't set contiguous if the combined extent would be too large.
2261 state |= BMAP_LEFT_VALID;
2262 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
2264 if (isnullstartblock(LEFT.br_startblock))
2265 state |= BMAP_LEFT_DELAY;
2268 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2269 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2270 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2271 LEFT.br_state == newext &&
2272 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2273 state |= BMAP_LEFT_CONTIG;
2276 * Check and set flags if this segment has a right neighbor.
2277 * Don't set contiguous if the combined extent would be too large.
2278 * Also check for all-three-contiguous being too large.
2280 if (*idx < xfs_iext_count(ifp) - 1) {
2281 state |= BMAP_RIGHT_VALID;
2282 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
2283 if (isnullstartblock(RIGHT.br_startblock))
2284 state |= BMAP_RIGHT_DELAY;
2287 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2288 new_endoff == RIGHT.br_startoff &&
2289 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2290 newext == RIGHT.br_state &&
2291 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
2292 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2293 BMAP_RIGHT_FILLING)) !=
2294 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2295 BMAP_RIGHT_FILLING) ||
2296 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2298 state |= BMAP_RIGHT_CONTIG;
2301 * Switch out based on the FILLING and CONTIG state bits.
2303 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2304 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2305 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2306 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2308 * Setting all of a previous oldext extent to newext.
2309 * The left and right neighbors are both contiguous with new.
2313 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2314 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2315 LEFT.br_blockcount + PREV.br_blockcount +
2316 RIGHT.br_blockcount);
2317 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2319 xfs_iext_remove(ip, *idx + 1, 2, state);
2320 XFS_IFORK_NEXT_SET(ip, whichfork,
2321 XFS_IFORK_NEXTENTS(ip, whichfork) - 2);
2323 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2325 rval = XFS_ILOG_CORE;
2326 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
2327 RIGHT.br_startblock,
2328 RIGHT.br_blockcount, &i)))
2330 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2331 if ((error = xfs_btree_delete(cur, &i)))
2333 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2334 if ((error = xfs_btree_decrement(cur, 0, &i)))
2336 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2337 if ((error = xfs_btree_delete(cur, &i)))
2339 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2340 if ((error = xfs_btree_decrement(cur, 0, &i)))
2342 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2343 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2345 LEFT.br_blockcount + PREV.br_blockcount +
2346 RIGHT.br_blockcount, LEFT.br_state)))
2351 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2353 * Setting all of a previous oldext extent to newext.
2354 * The left neighbor is contiguous, the right is not.
2358 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2359 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2360 LEFT.br_blockcount + PREV.br_blockcount);
2361 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2363 xfs_iext_remove(ip, *idx + 1, 1, state);
2364 XFS_IFORK_NEXT_SET(ip, whichfork,
2365 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2367 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2369 rval = XFS_ILOG_CORE;
2370 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2371 PREV.br_startblock, PREV.br_blockcount,
2374 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2375 if ((error = xfs_btree_delete(cur, &i)))
2377 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2378 if ((error = xfs_btree_decrement(cur, 0, &i)))
2380 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2381 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2383 LEFT.br_blockcount + PREV.br_blockcount,
2389 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2391 * Setting all of a previous oldext extent to newext.
2392 * The right neighbor is contiguous, the left is not.
2394 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2395 xfs_bmbt_set_blockcount(ep,
2396 PREV.br_blockcount + RIGHT.br_blockcount);
2397 xfs_bmbt_set_state(ep, newext);
2398 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2399 xfs_iext_remove(ip, *idx + 1, 1, state);
2400 XFS_IFORK_NEXT_SET(ip, whichfork,
2401 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2403 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2405 rval = XFS_ILOG_CORE;
2406 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
2407 RIGHT.br_startblock,
2408 RIGHT.br_blockcount, &i)))
2410 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2411 if ((error = xfs_btree_delete(cur, &i)))
2413 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2414 if ((error = xfs_btree_decrement(cur, 0, &i)))
2416 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2417 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2419 new->br_blockcount + RIGHT.br_blockcount,
2425 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2427 * Setting all of a previous oldext extent to newext.
2428 * Neither the left nor right neighbors are contiguous with
2431 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2432 xfs_bmbt_set_state(ep, newext);
2433 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2436 rval = XFS_ILOG_DEXT;
2439 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2440 new->br_startblock, new->br_blockcount,
2443 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2444 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2445 new->br_startblock, new->br_blockcount,
2451 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2453 * Setting the first part of a previous oldext extent to newext.
2454 * The left neighbor is contiguous.
2456 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
2457 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
2458 LEFT.br_blockcount + new->br_blockcount);
2459 xfs_bmbt_set_startoff(ep,
2460 PREV.br_startoff + new->br_blockcount);
2461 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
2463 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2464 xfs_bmbt_set_startblock(ep,
2465 new->br_startblock + new->br_blockcount);
2466 xfs_bmbt_set_blockcount(ep,
2467 PREV.br_blockcount - new->br_blockcount);
2468 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2473 rval = XFS_ILOG_DEXT;
2476 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2477 PREV.br_startblock, PREV.br_blockcount,
2480 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2481 if ((error = xfs_bmbt_update(cur,
2482 PREV.br_startoff + new->br_blockcount,
2483 PREV.br_startblock + new->br_blockcount,
2484 PREV.br_blockcount - new->br_blockcount,
2487 if ((error = xfs_btree_decrement(cur, 0, &i)))
2489 error = xfs_bmbt_update(cur, LEFT.br_startoff,
2491 LEFT.br_blockcount + new->br_blockcount,
2498 case BMAP_LEFT_FILLING:
2500 * Setting the first part of a previous oldext extent to newext.
2501 * The left neighbor is not contiguous.
2503 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2504 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
2505 xfs_bmbt_set_startoff(ep, new_endoff);
2506 xfs_bmbt_set_blockcount(ep,
2507 PREV.br_blockcount - new->br_blockcount);
2508 xfs_bmbt_set_startblock(ep,
2509 new->br_startblock + new->br_blockcount);
2510 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2512 xfs_iext_insert(ip, *idx, 1, new, state);
2513 XFS_IFORK_NEXT_SET(ip, whichfork,
2514 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2516 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2518 rval = XFS_ILOG_CORE;
2519 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2520 PREV.br_startblock, PREV.br_blockcount,
2523 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2524 if ((error = xfs_bmbt_update(cur,
2525 PREV.br_startoff + new->br_blockcount,
2526 PREV.br_startblock + new->br_blockcount,
2527 PREV.br_blockcount - new->br_blockcount,
2530 cur->bc_rec.b = *new;
2531 if ((error = xfs_btree_insert(cur, &i)))
2533 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2537 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2539 * Setting the last part of a previous oldext extent to newext.
2540 * The right neighbor is contiguous with the new allocation.
2542 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2543 xfs_bmbt_set_blockcount(ep,
2544 PREV.br_blockcount - new->br_blockcount);
2545 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2549 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2550 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
2551 new->br_startoff, new->br_startblock,
2552 new->br_blockcount + RIGHT.br_blockcount, newext);
2553 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2556 rval = XFS_ILOG_DEXT;
2559 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2561 PREV.br_blockcount, &i)))
2563 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2564 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2566 PREV.br_blockcount - new->br_blockcount,
2569 if ((error = xfs_btree_increment(cur, 0, &i)))
2571 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2573 new->br_blockcount + RIGHT.br_blockcount,
2579 case BMAP_RIGHT_FILLING:
2581 * Setting the last part of a previous oldext extent to newext.
2582 * The right neighbor is not contiguous.
2584 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2585 xfs_bmbt_set_blockcount(ep,
2586 PREV.br_blockcount - new->br_blockcount);
2587 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2590 xfs_iext_insert(ip, *idx, 1, new, state);
2592 XFS_IFORK_NEXT_SET(ip, whichfork,
2593 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2595 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2597 rval = XFS_ILOG_CORE;
2598 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2599 PREV.br_startblock, PREV.br_blockcount,
2602 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2603 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2605 PREV.br_blockcount - new->br_blockcount,
2608 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2609 new->br_startblock, new->br_blockcount,
2612 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2613 cur->bc_rec.b.br_state = XFS_EXT_NORM;
2614 if ((error = xfs_btree_insert(cur, &i)))
2616 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2622 * Setting the middle part of a previous oldext extent to
2623 * newext. Contiguity is impossible here.
2624 * One extent becomes three extents.
2626 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2627 xfs_bmbt_set_blockcount(ep,
2628 new->br_startoff - PREV.br_startoff);
2629 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2632 r[1].br_startoff = new_endoff;
2633 r[1].br_blockcount =
2634 PREV.br_startoff + PREV.br_blockcount - new_endoff;
2635 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2636 r[1].br_state = oldext;
2639 xfs_iext_insert(ip, *idx, 2, &r[0], state);
2641 XFS_IFORK_NEXT_SET(ip, whichfork,
2642 XFS_IFORK_NEXTENTS(ip, whichfork) + 2);
2644 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2646 rval = XFS_ILOG_CORE;
2647 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2648 PREV.br_startblock, PREV.br_blockcount,
2651 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2652 /* new right extent - oldext */
2653 if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
2654 r[1].br_startblock, r[1].br_blockcount,
2657 /* new left extent - oldext */
2658 cur->bc_rec.b = PREV;
2659 cur->bc_rec.b.br_blockcount =
2660 new->br_startoff - PREV.br_startoff;
2661 if ((error = xfs_btree_insert(cur, &i)))
2663 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2665 * Reset the cursor to the position of the new extent
2666 * we are about to insert as we can't trust it after
2667 * the previous insert.
2669 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2670 new->br_startblock, new->br_blockcount,
2673 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2674 /* new middle extent - newext */
2675 cur->bc_rec.b.br_state = new->br_state;
2676 if ((error = xfs_btree_insert(cur, &i)))
2678 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2682 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2683 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2684 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2685 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2686 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2687 case BMAP_LEFT_CONTIG:
2688 case BMAP_RIGHT_CONTIG:
2690 * These cases are all impossible.
2695 /* update reverse mappings */
2696 error = xfs_rmap_convert_extent(mp, dfops, ip, whichfork, new);
2700 /* convert to a btree if necessary */
2701 if (xfs_bmap_needs_btree(ip, whichfork)) {
2702 int tmp_logflags; /* partial log flag return val */
2704 ASSERT(cur == NULL);
2705 error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, &cur,
2706 0, &tmp_logflags, whichfork);
2707 *logflagsp |= tmp_logflags;
2712 /* clear out the allocated field, done with it now in any case. */
2714 cur->bc_private.b.allocated = 0;
2718 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2728 * Convert a hole to a delayed allocation.
2731 xfs_bmap_add_extent_hole_delay(
2732 xfs_inode_t *ip, /* incore inode pointer */
2734 xfs_extnum_t *idx, /* extent number to update/insert */
2735 xfs_bmbt_irec_t *new) /* new data to add to file extents */
2737 xfs_ifork_t *ifp; /* inode fork pointer */
2738 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2739 xfs_filblks_t newlen=0; /* new indirect size */
2740 xfs_filblks_t oldlen=0; /* old indirect size */
2741 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2742 int state; /* state bits, accessed thru macros */
2743 xfs_filblks_t temp=0; /* temp for indirect calculations */
2745 ifp = XFS_IFORK_PTR(ip, whichfork);
2747 if (whichfork == XFS_COW_FORK)
2748 state |= BMAP_COWFORK;
2749 ASSERT(isnullstartblock(new->br_startblock));
2752 * Check and set flags if this segment has a left neighbor
2755 state |= BMAP_LEFT_VALID;
2756 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
2758 if (isnullstartblock(left.br_startblock))
2759 state |= BMAP_LEFT_DELAY;
2763 * Check and set flags if the current (right) segment exists.
2764 * If it doesn't exist, we're converting the hole at end-of-file.
2766 if (*idx < xfs_iext_count(ifp)) {
2767 state |= BMAP_RIGHT_VALID;
2768 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
2770 if (isnullstartblock(right.br_startblock))
2771 state |= BMAP_RIGHT_DELAY;
2775 * Set contiguity flags on the left and right neighbors.
2776 * Don't let extents get too large, even if the pieces are contiguous.
2778 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2779 left.br_startoff + left.br_blockcount == new->br_startoff &&
2780 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2781 state |= BMAP_LEFT_CONTIG;
2783 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2784 new->br_startoff + new->br_blockcount == right.br_startoff &&
2785 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2786 (!(state & BMAP_LEFT_CONTIG) ||
2787 (left.br_blockcount + new->br_blockcount +
2788 right.br_blockcount <= MAXEXTLEN)))
2789 state |= BMAP_RIGHT_CONTIG;
2792 * Switch out based on the contiguity flags.
2794 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2795 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2797 * New allocation is contiguous with delayed allocations
2798 * on the left and on the right.
2799 * Merge all three into a single extent record.
2802 temp = left.br_blockcount + new->br_blockcount +
2803 right.br_blockcount;
2805 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2806 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
2807 oldlen = startblockval(left.br_startblock) +
2808 startblockval(new->br_startblock) +
2809 startblockval(right.br_startblock);
2810 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2812 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
2813 nullstartblock((int)newlen));
2814 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2816 xfs_iext_remove(ip, *idx + 1, 1, state);
2819 case BMAP_LEFT_CONTIG:
2821 * New allocation is contiguous with a delayed allocation
2823 * Merge the new allocation with the left neighbor.
2826 temp = left.br_blockcount + new->br_blockcount;
2828 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2829 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
2830 oldlen = startblockval(left.br_startblock) +
2831 startblockval(new->br_startblock);
2832 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2834 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
2835 nullstartblock((int)newlen));
2836 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2839 case BMAP_RIGHT_CONTIG:
2841 * New allocation is contiguous with a delayed allocation
2843 * Merge the new allocation with the right neighbor.
2845 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2846 temp = new->br_blockcount + right.br_blockcount;
2847 oldlen = startblockval(new->br_startblock) +
2848 startblockval(right.br_startblock);
2849 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2851 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
2853 nullstartblock((int)newlen), temp, right.br_state);
2854 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2859 * New allocation is not contiguous with another
2860 * delayed allocation.
2861 * Insert a new entry.
2863 oldlen = newlen = 0;
2864 xfs_iext_insert(ip, *idx, 1, new, state);
2867 if (oldlen != newlen) {
2868 ASSERT(oldlen > newlen);
2869 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2872 * Nothing to do for disk quota accounting here.
2878 * Convert a hole to a real allocation.
2880 STATIC int /* error */
2881 xfs_bmap_add_extent_hole_real(
2882 struct xfs_bmalloca *bma,
2885 struct xfs_bmbt_irec *new = &bma->got;
2886 int error; /* error return value */
2887 int i; /* temp state */
2888 xfs_ifork_t *ifp; /* inode fork pointer */
2889 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2890 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2891 int rval=0; /* return value (logging flags) */
2892 int state; /* state bits, accessed thru macros */
2893 struct xfs_mount *mp;
2895 mp = bma->ip->i_mount;
2896 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
2898 ASSERT(bma->idx >= 0);
2899 ASSERT(bma->idx <= xfs_iext_count(ifp));
2900 ASSERT(!isnullstartblock(new->br_startblock));
2902 !(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
2904 XFS_STATS_INC(mp, xs_add_exlist);
2907 if (whichfork == XFS_ATTR_FORK)
2908 state |= BMAP_ATTRFORK;
2909 if (whichfork == XFS_COW_FORK)
2910 state |= BMAP_COWFORK;
2913 * Check and set flags if this segment has a left neighbor.
2916 state |= BMAP_LEFT_VALID;
2917 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &left);
2918 if (isnullstartblock(left.br_startblock))
2919 state |= BMAP_LEFT_DELAY;
2923 * Check and set flags if this segment has a current value.
2924 * Not true if we're inserting into the "hole" at eof.
2926 if (bma->idx < xfs_iext_count(ifp)) {
2927 state |= BMAP_RIGHT_VALID;
2928 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &right);
2929 if (isnullstartblock(right.br_startblock))
2930 state |= BMAP_RIGHT_DELAY;
2934 * We're inserting a real allocation between "left" and "right".
2935 * Set the contiguity flags. Don't let extents get too large.
2937 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2938 left.br_startoff + left.br_blockcount == new->br_startoff &&
2939 left.br_startblock + left.br_blockcount == new->br_startblock &&
2940 left.br_state == new->br_state &&
2941 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2942 state |= BMAP_LEFT_CONTIG;
2944 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2945 new->br_startoff + new->br_blockcount == right.br_startoff &&
2946 new->br_startblock + new->br_blockcount == right.br_startblock &&
2947 new->br_state == right.br_state &&
2948 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2949 (!(state & BMAP_LEFT_CONTIG) ||
2950 left.br_blockcount + new->br_blockcount +
2951 right.br_blockcount <= MAXEXTLEN))
2952 state |= BMAP_RIGHT_CONTIG;
2956 * Select which case we're in here, and implement it.
2958 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2959 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2961 * New allocation is contiguous with real allocations on the
2962 * left and on the right.
2963 * Merge all three into a single extent record.
2966 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
2967 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
2968 left.br_blockcount + new->br_blockcount +
2969 right.br_blockcount);
2970 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2972 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
2974 XFS_IFORK_NEXT_SET(bma->ip, whichfork,
2975 XFS_IFORK_NEXTENTS(bma->ip, whichfork) - 1);
2976 if (bma->cur == NULL) {
2977 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2979 rval = XFS_ILOG_CORE;
2980 error = xfs_bmbt_lookup_eq(bma->cur, right.br_startoff,
2981 right.br_startblock, right.br_blockcount,
2985 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2986 error = xfs_btree_delete(bma->cur, &i);
2989 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2990 error = xfs_btree_decrement(bma->cur, 0, &i);
2993 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2994 error = xfs_bmbt_update(bma->cur, left.br_startoff,
2996 left.br_blockcount +
2997 new->br_blockcount +
2998 right.br_blockcount,
3005 case BMAP_LEFT_CONTIG:
3007 * New allocation is contiguous with a real allocation
3009 * Merge the new allocation with the left neighbor.
3012 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
3013 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
3014 left.br_blockcount + new->br_blockcount);
3015 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
3017 if (bma->cur == NULL) {
3018 rval = xfs_ilog_fext(whichfork);
3021 error = xfs_bmbt_lookup_eq(bma->cur, left.br_startoff,
3022 left.br_startblock, left.br_blockcount,
3026 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3027 error = xfs_bmbt_update(bma->cur, left.br_startoff,
3029 left.br_blockcount +
3037 case BMAP_RIGHT_CONTIG:
3039 * New allocation is contiguous with a real allocation
3041 * Merge the new allocation with the right neighbor.
3043 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
3044 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx),
3045 new->br_startoff, new->br_startblock,
3046 new->br_blockcount + right.br_blockcount,
3048 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
3050 if (bma->cur == NULL) {
3051 rval = xfs_ilog_fext(whichfork);
3054 error = xfs_bmbt_lookup_eq(bma->cur,
3056 right.br_startblock,
3057 right.br_blockcount, &i);
3060 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3061 error = xfs_bmbt_update(bma->cur, new->br_startoff,
3063 new->br_blockcount +
3064 right.br_blockcount,
3073 * New allocation is not contiguous with another
3075 * Insert a new entry.
3077 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
3078 XFS_IFORK_NEXT_SET(bma->ip, whichfork,
3079 XFS_IFORK_NEXTENTS(bma->ip, whichfork) + 1);
3080 if (bma->cur == NULL) {
3081 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
3083 rval = XFS_ILOG_CORE;
3084 error = xfs_bmbt_lookup_eq(bma->cur,
3087 new->br_blockcount, &i);
3090 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
3091 bma->cur->bc_rec.b.br_state = new->br_state;
3092 error = xfs_btree_insert(bma->cur, &i);
3095 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3100 /* add reverse mapping */
3101 error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new);
3105 /* convert to a btree if necessary */
3106 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
3107 int tmp_logflags; /* partial log flag return val */
3109 ASSERT(bma->cur == NULL);
3110 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
3111 bma->firstblock, bma->dfops, &bma->cur,
3112 0, &tmp_logflags, whichfork);
3113 bma->logflags |= tmp_logflags;
3118 /* clear out the allocated field, done with it now in any case. */
3120 bma->cur->bc_private.b.allocated = 0;
3122 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
3124 bma->logflags |= rval;
3129 * Functions used in the extent read, allocate and remove paths
3133 * Adjust the size of the new extent based on di_extsize and rt extsize.
3136 xfs_bmap_extsize_align(
3138 xfs_bmbt_irec_t *gotp, /* next extent pointer */
3139 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
3140 xfs_extlen_t extsz, /* align to this extent size */
3141 int rt, /* is this a realtime inode? */
3142 int eof, /* is extent at end-of-file? */
3143 int delay, /* creating delalloc extent? */
3144 int convert, /* overwriting unwritten extent? */
3145 xfs_fileoff_t *offp, /* in/out: aligned offset */
3146 xfs_extlen_t *lenp) /* in/out: aligned length */
3148 xfs_fileoff_t orig_off; /* original offset */
3149 xfs_extlen_t orig_alen; /* original length */
3150 xfs_fileoff_t orig_end; /* original off+len */
3151 xfs_fileoff_t nexto; /* next file offset */
3152 xfs_fileoff_t prevo; /* previous file offset */
3153 xfs_fileoff_t align_off; /* temp for offset */
3154 xfs_extlen_t align_alen; /* temp for length */
3155 xfs_extlen_t temp; /* temp for calculations */
3160 orig_off = align_off = *offp;
3161 orig_alen = align_alen = *lenp;
3162 orig_end = orig_off + orig_alen;
3165 * If this request overlaps an existing extent, then don't
3166 * attempt to perform any additional alignment.
3168 if (!delay && !eof &&
3169 (orig_off >= gotp->br_startoff) &&
3170 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
3175 * If the file offset is unaligned vs. the extent size
3176 * we need to align it. This will be possible unless
3177 * the file was previously written with a kernel that didn't
3178 * perform this alignment, or if a truncate shot us in the
3181 temp = do_mod(orig_off, extsz);
3187 /* Same adjustment for the end of the requested area. */
3188 temp = (align_alen % extsz);
3190 align_alen += extsz - temp;
3193 * For large extent hint sizes, the aligned extent might be larger than
3194 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
3195 * the length back under MAXEXTLEN. The outer allocation loops handle
3196 * short allocation just fine, so it is safe to do this. We only want to
3197 * do it when we are forced to, though, because it means more allocation
3198 * operations are required.
3200 while (align_alen > MAXEXTLEN)
3201 align_alen -= extsz;
3202 ASSERT(align_alen <= MAXEXTLEN);
3205 * If the previous block overlaps with this proposed allocation
3206 * then move the start forward without adjusting the length.
3208 if (prevp->br_startoff != NULLFILEOFF) {
3209 if (prevp->br_startblock == HOLESTARTBLOCK)
3210 prevo = prevp->br_startoff;
3212 prevo = prevp->br_startoff + prevp->br_blockcount;
3215 if (align_off != orig_off && align_off < prevo)
3218 * If the next block overlaps with this proposed allocation
3219 * then move the start back without adjusting the length,
3220 * but not before offset 0.
3221 * This may of course make the start overlap previous block,
3222 * and if we hit the offset 0 limit then the next block
3223 * can still overlap too.
3225 if (!eof && gotp->br_startoff != NULLFILEOFF) {
3226 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
3227 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
3228 nexto = gotp->br_startoff + gotp->br_blockcount;
3230 nexto = gotp->br_startoff;
3232 nexto = NULLFILEOFF;
3234 align_off + align_alen != orig_end &&
3235 align_off + align_alen > nexto)
3236 align_off = nexto > align_alen ? nexto - align_alen : 0;
3238 * If we're now overlapping the next or previous extent that
3239 * means we can't fit an extsz piece in this hole. Just move
3240 * the start forward to the first valid spot and set
3241 * the length so we hit the end.
3243 if (align_off != orig_off && align_off < prevo)
3245 if (align_off + align_alen != orig_end &&
3246 align_off + align_alen > nexto &&
3247 nexto != NULLFILEOFF) {
3248 ASSERT(nexto > prevo);
3249 align_alen = nexto - align_off;
3253 * If realtime, and the result isn't a multiple of the realtime
3254 * extent size we need to remove blocks until it is.
3256 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
3258 * We're not covering the original request, or
3259 * we won't be able to once we fix the length.
3261 if (orig_off < align_off ||
3262 orig_end > align_off + align_alen ||
3263 align_alen - temp < orig_alen)
3266 * Try to fix it by moving the start up.
3268 if (align_off + temp <= orig_off) {
3273 * Try to fix it by moving the end in.
3275 else if (align_off + align_alen - temp >= orig_end)
3278 * Set the start to the minimum then trim the length.
3281 align_alen -= orig_off - align_off;
3282 align_off = orig_off;
3283 align_alen -= align_alen % mp->m_sb.sb_rextsize;
3286 * Result doesn't cover the request, fail it.
3288 if (orig_off < align_off || orig_end > align_off + align_alen)
3291 ASSERT(orig_off >= align_off);
3292 /* see MAXEXTLEN handling above */
3293 ASSERT(orig_end <= align_off + align_alen ||
3294 align_alen + extsz > MAXEXTLEN);
3298 if (!eof && gotp->br_startoff != NULLFILEOFF)
3299 ASSERT(align_off + align_alen <= gotp->br_startoff);
3300 if (prevp->br_startoff != NULLFILEOFF)
3301 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3309 #define XFS_ALLOC_GAP_UNITS 4
3313 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3315 xfs_fsblock_t adjust; /* adjustment to block numbers */
3316 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3317 xfs_mount_t *mp; /* mount point structure */
3318 int nullfb; /* true if ap->firstblock isn't set */
3319 int rt; /* true if inode is realtime */
3321 #define ISVALID(x,y) \
3323 (x) < mp->m_sb.sb_rblocks : \
3324 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3325 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3326 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3328 mp = ap->ip->i_mount;
3329 nullfb = *ap->firstblock == NULLFSBLOCK;
3330 rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3331 xfs_alloc_is_userdata(ap->datatype);
3332 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3334 * If allocating at eof, and there's a previous real block,
3335 * try to use its last block as our starting point.
3337 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3338 !isnullstartblock(ap->prev.br_startblock) &&
3339 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3340 ap->prev.br_startblock)) {
3341 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3343 * Adjust for the gap between prevp and us.
3345 adjust = ap->offset -
3346 (ap->prev.br_startoff + ap->prev.br_blockcount);
3348 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3349 ap->blkno += adjust;
3352 * If not at eof, then compare the two neighbor blocks.
3353 * Figure out whether either one gives us a good starting point,
3354 * and pick the better one.
3356 else if (!ap->eof) {
3357 xfs_fsblock_t gotbno; /* right side block number */
3358 xfs_fsblock_t gotdiff=0; /* right side difference */
3359 xfs_fsblock_t prevbno; /* left side block number */
3360 xfs_fsblock_t prevdiff=0; /* left side difference */
3363 * If there's a previous (left) block, select a requested
3364 * start block based on it.
3366 if (ap->prev.br_startoff != NULLFILEOFF &&
3367 !isnullstartblock(ap->prev.br_startblock) &&
3368 (prevbno = ap->prev.br_startblock +
3369 ap->prev.br_blockcount) &&
3370 ISVALID(prevbno, ap->prev.br_startblock)) {
3372 * Calculate gap to end of previous block.
3374 adjust = prevdiff = ap->offset -
3375 (ap->prev.br_startoff +
3376 ap->prev.br_blockcount);
3378 * Figure the startblock based on the previous block's
3379 * end and the gap size.
3381 * If the gap is large relative to the piece we're
3382 * allocating, or using it gives us an invalid block
3383 * number, then just use the end of the previous block.
3385 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3386 ISVALID(prevbno + prevdiff,
3387 ap->prev.br_startblock))
3392 * If the firstblock forbids it, can't use it,
3395 if (!rt && !nullfb &&
3396 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
3397 prevbno = NULLFSBLOCK;
3400 * No previous block or can't follow it, just default.
3403 prevbno = NULLFSBLOCK;
3405 * If there's a following (right) block, select a requested
3406 * start block based on it.
3408 if (!isnullstartblock(ap->got.br_startblock)) {
3410 * Calculate gap to start of next block.
3412 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3414 * Figure the startblock based on the next block's
3415 * start and the gap size.
3417 gotbno = ap->got.br_startblock;
3420 * If the gap is large relative to the piece we're
3421 * allocating, or using it gives us an invalid block
3422 * number, then just use the start of the next block
3423 * offset by our length.
3425 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3426 ISVALID(gotbno - gotdiff, gotbno))
3428 else if (ISVALID(gotbno - ap->length, gotbno)) {
3429 gotbno -= ap->length;
3430 gotdiff += adjust - ap->length;
3434 * If the firstblock forbids it, can't use it,
3437 if (!rt && !nullfb &&
3438 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
3439 gotbno = NULLFSBLOCK;
3442 * No next block, just default.
3445 gotbno = NULLFSBLOCK;
3447 * If both valid, pick the better one, else the only good
3448 * one, else ap->blkno is already set (to 0 or the inode block).
3450 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
3451 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3452 else if (prevbno != NULLFSBLOCK)
3453 ap->blkno = prevbno;
3454 else if (gotbno != NULLFSBLOCK)
3461 xfs_bmap_longest_free_extent(
3462 struct xfs_trans *tp,
3467 struct xfs_mount *mp = tp->t_mountp;
3468 struct xfs_perag *pag;
3469 xfs_extlen_t longest;
3472 pag = xfs_perag_get(mp, ag);
3473 if (!pag->pagf_init) {
3474 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
3478 if (!pag->pagf_init) {
3484 longest = xfs_alloc_longest_free_extent(mp, pag,
3485 xfs_alloc_min_freelist(mp, pag),
3486 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3487 if (*blen < longest)
3496 xfs_bmap_select_minlen(
3497 struct xfs_bmalloca *ap,
3498 struct xfs_alloc_arg *args,
3502 if (notinit || *blen < ap->minlen) {
3504 * Since we did a BUF_TRYLOCK above, it is possible that
3505 * there is space for this request.
3507 args->minlen = ap->minlen;
3508 } else if (*blen < args->maxlen) {
3510 * If the best seen length is less than the request length,
3511 * use the best as the minimum.
3513 args->minlen = *blen;
3516 * Otherwise we've seen an extent as big as maxlen, use that
3519 args->minlen = args->maxlen;
3524 xfs_bmap_btalloc_nullfb(
3525 struct xfs_bmalloca *ap,
3526 struct xfs_alloc_arg *args,
3529 struct xfs_mount *mp = ap->ip->i_mount;
3530 xfs_agnumber_t ag, startag;
3534 args->type = XFS_ALLOCTYPE_START_BNO;
3535 args->total = ap->total;
3537 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3538 if (startag == NULLAGNUMBER)
3541 while (*blen < args->maxlen) {
3542 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3547 if (++ag == mp->m_sb.sb_agcount)
3553 xfs_bmap_select_minlen(ap, args, blen, notinit);
3558 xfs_bmap_btalloc_filestreams(
3559 struct xfs_bmalloca *ap,
3560 struct xfs_alloc_arg *args,
3563 struct xfs_mount *mp = ap->ip->i_mount;
3568 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3569 args->total = ap->total;
3571 ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3572 if (ag == NULLAGNUMBER)
3575 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init);
3579 if (*blen < args->maxlen) {
3580 error = xfs_filestream_new_ag(ap, &ag);
3584 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3591 xfs_bmap_select_minlen(ap, args, blen, notinit);
3594 * Set the failure fallback case to look in the selected AG as stream
3597 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
3603 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3605 xfs_mount_t *mp; /* mount point structure */
3606 xfs_alloctype_t atype = 0; /* type for allocation routines */
3607 xfs_extlen_t align = 0; /* minimum allocation alignment */
3608 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3610 xfs_alloc_arg_t args;
3612 xfs_extlen_t nextminlen = 0;
3613 int nullfb; /* true if ap->firstblock isn't set */
3621 mp = ap->ip->i_mount;
3623 /* stripe alignment for allocation is determined by mount parameters */
3625 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3626 stripe_align = mp->m_swidth;
3627 else if (mp->m_dalign)
3628 stripe_align = mp->m_dalign;
3630 if (ap->flags & XFS_BMAPI_COWFORK)
3631 align = xfs_get_cowextsz_hint(ap->ip);
3632 else if (xfs_alloc_is_userdata(ap->datatype))
3633 align = xfs_get_extsz_hint(ap->ip);
3635 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
3636 align, 0, ap->eof, 0, ap->conv,
3637 &ap->offset, &ap->length);
3643 nullfb = *ap->firstblock == NULLFSBLOCK;
3644 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3646 if (xfs_alloc_is_userdata(ap->datatype) &&
3647 xfs_inode_is_filestream(ap->ip)) {
3648 ag = xfs_filestream_lookup_ag(ap->ip);
3649 ag = (ag != NULLAGNUMBER) ? ag : 0;
3650 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
3652 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
3655 ap->blkno = *ap->firstblock;
3657 xfs_bmap_adjacent(ap);
3660 * If allowed, use ap->blkno; otherwise must use firstblock since
3661 * it's in the right allocation group.
3663 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
3666 ap->blkno = *ap->firstblock;
3668 * Normal allocation, done through xfs_alloc_vextent.
3670 tryagain = isaligned = 0;
3671 memset(&args, 0, sizeof(args));
3674 args.fsbno = ap->blkno;
3675 xfs_rmap_skip_owner_update(&args.oinfo);
3677 /* Trim the allocation back to the maximum an AG can fit. */
3678 args.maxlen = MIN(ap->length, mp->m_ag_max_usable);
3679 args.firstblock = *ap->firstblock;
3683 * Search for an allocation group with a single extent large
3684 * enough for the request. If one isn't found, then adjust
3685 * the minimum allocation size to the largest space found.
3687 if (xfs_alloc_is_userdata(ap->datatype) &&
3688 xfs_inode_is_filestream(ap->ip))
3689 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
3691 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
3694 } else if (ap->dfops->dop_low) {
3695 if (xfs_inode_is_filestream(ap->ip))
3696 args.type = XFS_ALLOCTYPE_FIRST_AG;
3698 args.type = XFS_ALLOCTYPE_START_BNO;
3699 args.total = args.minlen = ap->minlen;
3701 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3702 args.total = ap->total;
3703 args.minlen = ap->minlen;
3705 /* apply extent size hints if obtained earlier */
3708 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
3709 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3710 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3714 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3715 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
3716 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3719 * If we are not low on available data blocks, and the
3720 * underlying logical volume manager is a stripe, and
3721 * the file offset is zero then try to allocate data
3722 * blocks on stripe unit boundary.
3723 * NOTE: ap->aeof is only set if the allocation length
3724 * is >= the stripe unit and the allocation offset is
3725 * at the end of file.
3727 if (!ap->dfops->dop_low && ap->aeof) {
3729 args.alignment = stripe_align;
3733 * Adjust for alignment
3735 if (blen > args.alignment && blen <= args.maxlen)
3736 args.minlen = blen - args.alignment;
3737 args.minalignslop = 0;
3740 * First try an exact bno allocation.
3741 * If it fails then do a near or start bno
3742 * allocation with alignment turned on.
3746 args.type = XFS_ALLOCTYPE_THIS_BNO;
3749 * Compute the minlen+alignment for the
3750 * next case. Set slop so that the value
3751 * of minlen+alignment+slop doesn't go up
3752 * between the calls.
3754 if (blen > stripe_align && blen <= args.maxlen)
3755 nextminlen = blen - stripe_align;
3757 nextminlen = args.minlen;
3758 if (nextminlen + stripe_align > args.minlen + 1)
3760 nextminlen + stripe_align -
3763 args.minalignslop = 0;
3767 args.minalignslop = 0;
3769 args.minleft = ap->minleft;
3770 args.wasdel = ap->wasdel;
3771 args.resv = XFS_AG_RESV_NONE;
3772 args.datatype = ap->datatype;
3773 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO)
3776 error = xfs_alloc_vextent(&args);
3780 if (tryagain && args.fsbno == NULLFSBLOCK) {
3782 * Exact allocation failed. Now try with alignment
3786 args.fsbno = ap->blkno;
3787 args.alignment = stripe_align;
3788 args.minlen = nextminlen;
3789 args.minalignslop = 0;
3791 if ((error = xfs_alloc_vextent(&args)))
3794 if (isaligned && args.fsbno == NULLFSBLOCK) {
3796 * allocation failed, so turn off alignment and
3800 args.fsbno = ap->blkno;
3802 if ((error = xfs_alloc_vextent(&args)))
3805 if (args.fsbno == NULLFSBLOCK && nullfb &&
3806 args.minlen > ap->minlen) {
3807 args.minlen = ap->minlen;
3808 args.type = XFS_ALLOCTYPE_START_BNO;
3809 args.fsbno = ap->blkno;
3810 if ((error = xfs_alloc_vextent(&args)))
3813 if (args.fsbno == NULLFSBLOCK && nullfb) {
3815 args.type = XFS_ALLOCTYPE_FIRST_AG;
3816 args.total = ap->minlen;
3817 if ((error = xfs_alloc_vextent(&args)))
3819 ap->dfops->dop_low = true;
3821 if (args.fsbno != NULLFSBLOCK) {
3823 * check the allocation happened at the same or higher AG than
3824 * the first block that was allocated.
3826 ASSERT(*ap->firstblock == NULLFSBLOCK ||
3827 XFS_FSB_TO_AGNO(mp, *ap->firstblock) <=
3828 XFS_FSB_TO_AGNO(mp, args.fsbno));
3830 ap->blkno = args.fsbno;
3831 if (*ap->firstblock == NULLFSBLOCK)
3832 *ap->firstblock = args.fsbno;
3833 ASSERT(nullfb || fb_agno <= args.agno);
3834 ap->length = args.len;
3835 if (!(ap->flags & XFS_BMAPI_COWFORK))
3836 ap->ip->i_d.di_nblocks += args.len;
3837 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3839 ap->ip->i_delayed_blks -= args.len;
3841 * Adjust the disk quota also. This was reserved
3844 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3845 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
3846 XFS_TRANS_DQ_BCOUNT,
3849 ap->blkno = NULLFSBLOCK;
3856 * For a remap operation, just "allocate" an extent at the address that the
3857 * caller passed in, and ensure that the AGFL is the right size. The caller
3858 * will then map the "allocated" extent into the file somewhere.
3861 xfs_bmap_remap_alloc(
3862 struct xfs_bmalloca *ap)
3864 struct xfs_trans *tp = ap->tp;
3865 struct xfs_mount *mp = tp->t_mountp;
3867 struct xfs_alloc_arg args;
3871 * validate that the block number is legal - the enables us to detect
3872 * and handle a silent filesystem corruption rather than crashing.
3874 memset(&args, 0, sizeof(struct xfs_alloc_arg));
3876 args.mp = ap->tp->t_mountp;
3877 bno = *ap->firstblock;
3878 args.agno = XFS_FSB_TO_AGNO(mp, bno);
3879 args.agbno = XFS_FSB_TO_AGBNO(mp, bno);
3880 if (args.agno >= mp->m_sb.sb_agcount ||
3881 args.agbno >= mp->m_sb.sb_agblocks)
3882 return -EFSCORRUPTED;
3884 /* "Allocate" the extent from the range we passed in. */
3885 trace_xfs_bmap_remap_alloc(ap->ip, *ap->firstblock, ap->length);
3887 ap->ip->i_d.di_nblocks += ap->length;
3888 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3890 /* Fix the freelist, like a real allocator does. */
3891 args.datatype = ap->datatype;
3892 args.pag = xfs_perag_get(args.mp, args.agno);
3896 * The freelist fixing code will decline the allocation if
3897 * the size and shape of the free space doesn't allow for
3898 * allocating the extent and updating all the metadata that
3899 * happens during an allocation. We're remapping, not
3900 * allocating, so skip that check by pretending to be freeing.
3902 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
3903 xfs_perag_put(args.pag);
3905 trace_xfs_bmap_remap_alloc_error(ap->ip, error, _RET_IP_);
3910 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
3911 * It figures out where to ask the underlying allocator to put the new extent.
3915 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3917 if (ap->flags & XFS_BMAPI_REMAP)
3918 return xfs_bmap_remap_alloc(ap);
3919 if (XFS_IS_REALTIME_INODE(ap->ip) &&
3920 xfs_alloc_is_userdata(ap->datatype))
3921 return xfs_bmap_rtalloc(ap);
3922 return xfs_bmap_btalloc(ap);
3925 /* Trim extent to fit a logical block range. */
3928 struct xfs_bmbt_irec *irec,
3932 xfs_fileoff_t distance;
3933 xfs_fileoff_t end = bno + len;
3935 if (irec->br_startoff + irec->br_blockcount <= bno ||
3936 irec->br_startoff >= end) {
3937 irec->br_blockcount = 0;
3941 if (irec->br_startoff < bno) {
3942 distance = bno - irec->br_startoff;
3943 if (isnullstartblock(irec->br_startblock))
3944 irec->br_startblock = DELAYSTARTBLOCK;
3945 if (irec->br_startblock != DELAYSTARTBLOCK &&
3946 irec->br_startblock != HOLESTARTBLOCK)
3947 irec->br_startblock += distance;
3948 irec->br_startoff += distance;
3949 irec->br_blockcount -= distance;
3952 if (end < irec->br_startoff + irec->br_blockcount) {
3953 distance = irec->br_startoff + irec->br_blockcount - end;
3954 irec->br_blockcount -= distance;
3959 * Trim the returned map to the required bounds
3963 struct xfs_bmbt_irec *mval,
3964 struct xfs_bmbt_irec *got,
3972 if ((flags & XFS_BMAPI_ENTIRE) ||
3973 got->br_startoff + got->br_blockcount <= obno) {
3975 if (isnullstartblock(got->br_startblock))
3976 mval->br_startblock = DELAYSTARTBLOCK;
3982 ASSERT((*bno >= obno) || (n == 0));
3984 mval->br_startoff = *bno;
3985 if (isnullstartblock(got->br_startblock))
3986 mval->br_startblock = DELAYSTARTBLOCK;
3988 mval->br_startblock = got->br_startblock +
3989 (*bno - got->br_startoff);
3991 * Return the minimum of what we got and what we asked for for
3992 * the length. We can use the len variable here because it is
3993 * modified below and we could have been there before coming
3994 * here if the first part of the allocation didn't overlap what
3997 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3998 got->br_blockcount - (*bno - got->br_startoff));
3999 mval->br_state = got->br_state;
4000 ASSERT(mval->br_blockcount <= len);
4005 * Update and validate the extent map to return
4008 xfs_bmapi_update_map(
4009 struct xfs_bmbt_irec **map,
4017 xfs_bmbt_irec_t *mval = *map;
4019 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
4020 ((mval->br_startoff + mval->br_blockcount) <= end));
4021 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
4022 (mval->br_startoff < obno));
4024 *bno = mval->br_startoff + mval->br_blockcount;
4026 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
4027 /* update previous map with new information */
4028 ASSERT(mval->br_startblock == mval[-1].br_startblock);
4029 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
4030 ASSERT(mval->br_state == mval[-1].br_state);
4031 mval[-1].br_blockcount = mval->br_blockcount;
4032 mval[-1].br_state = mval->br_state;
4033 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
4034 mval[-1].br_startblock != DELAYSTARTBLOCK &&
4035 mval[-1].br_startblock != HOLESTARTBLOCK &&
4036 mval->br_startblock == mval[-1].br_startblock +
4037 mval[-1].br_blockcount &&
4038 ((flags & XFS_BMAPI_IGSTATE) ||
4039 mval[-1].br_state == mval->br_state)) {
4040 ASSERT(mval->br_startoff ==
4041 mval[-1].br_startoff + mval[-1].br_blockcount);
4042 mval[-1].br_blockcount += mval->br_blockcount;
4043 } else if (*n > 0 &&
4044 mval->br_startblock == DELAYSTARTBLOCK &&
4045 mval[-1].br_startblock == DELAYSTARTBLOCK &&
4046 mval->br_startoff ==
4047 mval[-1].br_startoff + mval[-1].br_blockcount) {
4048 mval[-1].br_blockcount += mval->br_blockcount;
4049 mval[-1].br_state = mval->br_state;
4050 } else if (!((*n == 0) &&
4051 ((mval->br_startoff + mval->br_blockcount) <=
4060 * Map file blocks to filesystem blocks without allocation.
4064 struct xfs_inode *ip,
4067 struct xfs_bmbt_irec *mval,
4071 struct xfs_mount *mp = ip->i_mount;
4072 struct xfs_ifork *ifp;
4073 struct xfs_bmbt_irec got;
4080 int whichfork = xfs_bmapi_whichfork(flags);
4083 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
4084 XFS_BMAPI_IGSTATE|XFS_BMAPI_COWFORK)));
4085 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
4087 if (unlikely(XFS_TEST_ERROR(
4088 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4089 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4090 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4091 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
4092 return -EFSCORRUPTED;
4095 if (XFS_FORCED_SHUTDOWN(mp))
4098 XFS_STATS_INC(mp, xs_blk_mapr);
4100 ifp = XFS_IFORK_PTR(ip, whichfork);
4102 /* No CoW fork? Return a hole. */
4103 if (whichfork == XFS_COW_FORK && !ifp) {
4104 mval->br_startoff = bno;
4105 mval->br_startblock = HOLESTARTBLOCK;
4106 mval->br_blockcount = len;
4107 mval->br_state = XFS_EXT_NORM;
4112 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4113 error = xfs_iread_extents(NULL, ip, whichfork);
4118 if (!xfs_iext_lookup_extent(ip, ifp, bno, &idx, &got))
4123 while (bno < end && n < *nmap) {
4124 /* Reading past eof, act as though there's a hole up to end. */
4126 got.br_startoff = end;
4127 if (got.br_startoff > bno) {
4128 /* Reading in a hole. */
4129 mval->br_startoff = bno;
4130 mval->br_startblock = HOLESTARTBLOCK;
4131 mval->br_blockcount =
4132 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
4133 mval->br_state = XFS_EXT_NORM;
4134 bno += mval->br_blockcount;
4135 len -= mval->br_blockcount;
4141 /* set up the extent map to return. */
4142 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
4143 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4145 /* If we're done, stop now. */
4146 if (bno >= end || n >= *nmap)
4149 /* Else go on to the next record. */
4150 if (!xfs_iext_get_extent(ifp, ++idx, &got))
4158 * Add a delayed allocation extent to an inode. Blocks are reserved from the
4159 * global pool and the extent inserted into the inode in-core extent tree.
4161 * On entry, got refers to the first extent beyond the offset of the extent to
4162 * allocate or eof is specified if no such extent exists. On return, got refers
4163 * to the extent record that was inserted to the inode fork.
4165 * Note that the allocated extent may have been merged with contiguous extents
4166 * during insertion into the inode fork. Thus, got does not reflect the current
4167 * state of the inode fork on return. If necessary, the caller can use lastx to
4168 * look up the updated record in the inode fork.
4171 xfs_bmapi_reserve_delalloc(
4172 struct xfs_inode *ip,
4176 xfs_filblks_t prealloc,
4177 struct xfs_bmbt_irec *got,
4178 xfs_extnum_t *lastx,
4181 struct xfs_mount *mp = ip->i_mount;
4182 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4184 xfs_extlen_t indlen;
4185 char rt = XFS_IS_REALTIME_INODE(ip);
4188 xfs_fileoff_t aoff = off;
4191 * Cap the alloc length. Keep track of prealloc so we know whether to
4192 * tag the inode before we return.
4194 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN);
4196 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
4197 if (prealloc && alen >= len)
4198 prealloc = alen - len;
4200 /* Figure out the extent size, adjust alen */
4201 if (whichfork == XFS_COW_FORK)
4202 extsz = xfs_get_cowextsz_hint(ip);
4204 extsz = xfs_get_extsz_hint(ip);
4206 struct xfs_bmbt_irec prev;
4208 if (!xfs_iext_get_extent(ifp, *lastx - 1, &prev))
4209 prev.br_startoff = NULLFILEOFF;
4211 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, rt, eof,
4212 1, 0, &aoff, &alen);
4217 extsz = alen / mp->m_sb.sb_rextsize;
4220 * Make a transaction-less quota reservation for delayed allocation
4221 * blocks. This number gets adjusted later. We return if we haven't
4222 * allocated blocks already inside this loop.
4224 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
4225 rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4230 * Split changing sb for alen and indlen since they could be coming
4231 * from different places.
4233 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4237 error = xfs_mod_frextents(mp, -((int64_t)extsz));
4239 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
4243 goto out_unreserve_quota;
4245 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
4247 goto out_unreserve_blocks;
4250 ip->i_delayed_blks += alen;
4252 got->br_startoff = aoff;
4253 got->br_startblock = nullstartblock(indlen);
4254 got->br_blockcount = alen;
4255 got->br_state = XFS_EXT_NORM;
4257 xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
4260 * Tag the inode if blocks were preallocated. Note that COW fork
4261 * preallocation can occur at the start or end of the extent, even when
4262 * prealloc == 0, so we must also check the aligned offset and length.
4264 if (whichfork == XFS_DATA_FORK && prealloc)
4265 xfs_inode_set_eofblocks_tag(ip);
4266 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4267 xfs_inode_set_cowblocks_tag(ip);
4271 out_unreserve_blocks:
4273 xfs_mod_frextents(mp, extsz);
4275 xfs_mod_fdblocks(mp, alen, false);
4276 out_unreserve_quota:
4277 if (XFS_IS_QUOTA_ON(mp))
4278 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ?
4279 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4285 struct xfs_bmalloca *bma)
4287 struct xfs_mount *mp = bma->ip->i_mount;
4288 int whichfork = xfs_bmapi_whichfork(bma->flags);
4289 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4290 int tmp_logflags = 0;
4293 ASSERT(bma->length > 0);
4296 * For the wasdelay case, we could also just allocate the stuff asked
4297 * for in this bmap call but that wouldn't be as good.
4300 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4301 bma->offset = bma->got.br_startoff;
4303 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1),
4307 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4309 bma->length = XFS_FILBLKS_MIN(bma->length,
4310 bma->got.br_startoff - bma->offset);
4314 * Set the data type being allocated. For the data fork, the first data
4315 * in the file is treated differently to all other allocations. For the
4316 * attribute fork, we only need to ensure the allocated range is not on
4319 if (!(bma->flags & XFS_BMAPI_METADATA)) {
4320 bma->datatype = XFS_ALLOC_NOBUSY;
4321 if (whichfork == XFS_DATA_FORK) {
4322 if (bma->offset == 0)
4323 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4325 bma->datatype |= XFS_ALLOC_USERDATA;
4327 if (bma->flags & XFS_BMAPI_ZERO)
4328 bma->datatype |= XFS_ALLOC_USERDATA_ZERO;
4331 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
4334 * Only want to do the alignment at the eof if it is userdata and
4335 * allocation length is larger than a stripe unit.
4337 if (mp->m_dalign && bma->length >= mp->m_dalign &&
4338 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
4339 error = xfs_bmap_isaeof(bma, whichfork);
4344 error = xfs_bmap_alloc(bma);
4349 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4350 if (bma->blkno == NULLFSBLOCK)
4352 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4353 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4354 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4355 bma->cur->bc_private.b.dfops = bma->dfops;
4358 * Bump the number of extents we've allocated
4364 bma->cur->bc_private.b.flags =
4365 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
4367 bma->got.br_startoff = bma->offset;
4368 bma->got.br_startblock = bma->blkno;
4369 bma->got.br_blockcount = bma->length;
4370 bma->got.br_state = XFS_EXT_NORM;
4373 * In the data fork, a wasdelay extent has been initialized, so
4374 * shouldn't be flagged as unwritten.
4376 * For the cow fork, however, we convert delalloc reservations
4377 * (extents allocated for speculative preallocation) to
4378 * allocated unwritten extents, and only convert the unwritten
4379 * extents to real extents when we're about to write the data.
4381 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) &&
4382 (bma->flags & XFS_BMAPI_PREALLOC) &&
4383 xfs_sb_version_hasextflgbit(&mp->m_sb))
4384 bma->got.br_state = XFS_EXT_UNWRITTEN;
4387 error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4389 error = xfs_bmap_add_extent_hole_real(bma, whichfork);
4391 bma->logflags |= tmp_logflags;
4396 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4397 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4398 * the neighbouring ones.
4400 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4402 ASSERT(bma->got.br_startoff <= bma->offset);
4403 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4404 bma->offset + bma->length);
4405 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4406 bma->got.br_state == XFS_EXT_UNWRITTEN);
4411 xfs_bmapi_convert_unwritten(
4412 struct xfs_bmalloca *bma,
4413 struct xfs_bmbt_irec *mval,
4417 int whichfork = xfs_bmapi_whichfork(flags);
4418 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4419 int tmp_logflags = 0;
4422 /* check if we need to do unwritten->real conversion */
4423 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4424 (flags & XFS_BMAPI_PREALLOC))
4427 /* check if we need to do real->unwritten conversion */
4428 if (mval->br_state == XFS_EXT_NORM &&
4429 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4430 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4434 * Modify (by adding) the state flag, if writing.
4436 ASSERT(mval->br_blockcount <= len);
4437 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4438 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4439 bma->ip, whichfork);
4440 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4441 bma->cur->bc_private.b.dfops = bma->dfops;
4443 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4444 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4447 * Before insertion into the bmbt, zero the range being converted
4450 if (flags & XFS_BMAPI_ZERO) {
4451 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4452 mval->br_blockcount);
4457 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4458 &bma->idx, &bma->cur, mval, bma->firstblock, bma->dfops,
4461 * Log the inode core unconditionally in the unwritten extent conversion
4462 * path because the conversion might not have done so (e.g., if the
4463 * extent count hasn't changed). We need to make sure the inode is dirty
4464 * in the transaction for the sake of fsync(), even if nothing has
4465 * changed, because fsync() will not force the log for this transaction
4466 * unless it sees the inode pinned.
4468 * Note: If we're only converting cow fork extents, there aren't
4469 * any on-disk updates to make, so we don't need to log anything.
4471 if (whichfork != XFS_COW_FORK)
4472 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4477 * Update our extent pointer, given that
4478 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4479 * of the neighbouring ones.
4481 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4484 * We may have combined previously unwritten space with written space,
4485 * so generate another request.
4487 if (mval->br_blockcount < len)
4493 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4494 * extent state if necessary. Details behaviour is controlled by the flags
4495 * parameter. Only allocates blocks from a single allocation group, to avoid
4498 * The returned value in "firstblock" from the first call in a transaction
4499 * must be remembered and presented to subsequent calls in "firstblock".
4500 * An upper bound for the number of blocks to be allocated is supplied to
4501 * the first call in "total"; if no allocation group has that many free
4502 * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4506 struct xfs_trans *tp, /* transaction pointer */
4507 struct xfs_inode *ip, /* incore inode */
4508 xfs_fileoff_t bno, /* starting file offs. mapped */
4509 xfs_filblks_t len, /* length to map in file */
4510 int flags, /* XFS_BMAPI_... */
4511 xfs_fsblock_t *firstblock, /* first allocated block
4512 controls a.g. for allocs */
4513 xfs_extlen_t total, /* total blocks needed */
4514 struct xfs_bmbt_irec *mval, /* output: map values */
4515 int *nmap, /* i/o: mval size/count */
4516 struct xfs_defer_ops *dfops) /* i/o: list extents to free */
4518 struct xfs_mount *mp = ip->i_mount;
4519 struct xfs_ifork *ifp;
4520 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */
4521 xfs_fileoff_t end; /* end of mapped file region */
4522 bool eof = false; /* after the end of extents */
4523 int error; /* error return */
4524 int n; /* current extent index */
4525 xfs_fileoff_t obno; /* old block number (offset) */
4526 int whichfork; /* data or attr fork */
4529 xfs_fileoff_t orig_bno; /* original block number value */
4530 int orig_flags; /* original flags arg value */
4531 xfs_filblks_t orig_len; /* original value of len arg */
4532 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4533 int orig_nmap; /* original value of *nmap */
4541 whichfork = xfs_bmapi_whichfork(flags);
4544 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4545 ASSERT(!(flags & XFS_BMAPI_IGSTATE));
4546 ASSERT(tp != NULL ||
4547 (flags & (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)) ==
4548 (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK));
4550 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
4551 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4552 ASSERT(!(flags & XFS_BMAPI_REMAP) || whichfork == XFS_DATA_FORK);
4553 ASSERT(!(flags & XFS_BMAPI_PREALLOC) || !(flags & XFS_BMAPI_REMAP));
4554 ASSERT(!(flags & XFS_BMAPI_CONVERT) || !(flags & XFS_BMAPI_REMAP));
4556 /* zeroing is for currently only for data extents, not metadata */
4557 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4558 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4560 * we can allocate unwritten extents or pre-zero allocated blocks,
4561 * but it makes no sense to do both at once. This would result in
4562 * zeroing the unwritten extent twice, but it still being an
4563 * unwritten extent....
4565 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4566 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4568 if (unlikely(XFS_TEST_ERROR(
4569 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4570 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4571 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4572 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
4573 return -EFSCORRUPTED;
4576 if (XFS_FORCED_SHUTDOWN(mp))
4579 ifp = XFS_IFORK_PTR(ip, whichfork);
4581 XFS_STATS_INC(mp, xs_blk_mapw);
4583 if (*firstblock == NULLFSBLOCK) {
4584 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
4585 bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
4592 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4593 error = xfs_iread_extents(tp, ip, whichfork);
4602 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.idx, &bma.got))
4604 if (!xfs_iext_get_extent(ifp, bma.idx - 1, &bma.prev))
4605 bma.prev.br_startoff = NULLFILEOFF;
4611 bma.firstblock = firstblock;
4613 while (bno < end && n < *nmap) {
4614 bool need_alloc = false, wasdelay = false;
4616 /* in hole or beyoned EOF? */
4617 if (eof || bma.got.br_startoff > bno) {
4618 if (flags & XFS_BMAPI_DELALLOC) {
4620 * For the COW fork we can reasonably get a
4621 * request for converting an extent that races
4622 * with other threads already having converted
4623 * part of it, as there converting COW to
4624 * regular blocks is not protected using the
4627 ASSERT(flags & XFS_BMAPI_COWFORK);
4628 if (!(flags & XFS_BMAPI_COWFORK)) {
4633 if (eof || bno >= end)
4640 * Make sure we only reflink into a hole.
4642 ASSERT(!(flags & XFS_BMAPI_REMAP));
4643 if (isnullstartblock(bma.got.br_startblock))
4648 * First, deal with the hole before the allocated space
4649 * that we found, if any.
4651 if (need_alloc || wasdelay) {
4653 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4654 bma.wasdel = wasdelay;
4659 * There's a 32/64 bit type mismatch between the
4660 * allocation length request (which can be 64 bits in
4661 * length) and the bma length request, which is
4662 * xfs_extlen_t and therefore 32 bits. Hence we have to
4663 * check for 32-bit overflows and handle them here.
4665 if (len > (xfs_filblks_t)MAXEXTLEN)
4666 bma.length = MAXEXTLEN;
4671 ASSERT(bma.length > 0);
4672 error = xfs_bmapi_allocate(&bma);
4675 if (bma.blkno == NULLFSBLOCK)
4679 * If this is a CoW allocation, record the data in
4680 * the refcount btree for orphan recovery.
4682 if (whichfork == XFS_COW_FORK) {
4683 error = xfs_refcount_alloc_cow_extent(mp, dfops,
4684 bma.blkno, bma.length);
4690 /* Deal with the allocated space we found. */
4691 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4694 /* Execute unwritten extent conversion if necessary */
4695 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4696 if (error == -EAGAIN)
4701 /* update the extent map to return */
4702 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4705 * If we're done, stop now. Stop when we've allocated
4706 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4707 * the transaction may get too big.
4709 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4712 /* Else go on to the next record. */
4714 if (!xfs_iext_get_extent(ifp, ++bma.idx, &bma.got))
4720 * Transform from btree to extents, give it cur.
4722 if (xfs_bmap_wants_extents(ip, whichfork)) {
4723 int tmp_logflags = 0;
4726 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur,
4727 &tmp_logflags, whichfork);
4728 bma.logflags |= tmp_logflags;
4733 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
4734 XFS_IFORK_NEXTENTS(ip, whichfork) >
4735 XFS_IFORK_MAXEXT(ip, whichfork));
4739 * Log everything. Do this after conversion, there's no point in
4740 * logging the extent records if we've converted to btree format.
4742 if ((bma.logflags & xfs_ilog_fext(whichfork)) &&
4743 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4744 bma.logflags &= ~xfs_ilog_fext(whichfork);
4745 else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) &&
4746 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
4747 bma.logflags &= ~xfs_ilog_fbroot(whichfork);
4749 * Log whatever the flags say, even if error. Otherwise we might miss
4750 * detecting a case where the data is changed, there's an error,
4751 * and it's not logged so we don't shutdown when we should.
4754 xfs_trans_log_inode(tp, ip, bma.logflags);
4758 ASSERT(*firstblock == NULLFSBLOCK ||
4759 XFS_FSB_TO_AGNO(mp, *firstblock) <=
4761 bma.cur->bc_private.b.firstblock));
4762 *firstblock = bma.cur->bc_private.b.firstblock;
4764 xfs_btree_del_cursor(bma.cur,
4765 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
4768 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4774 * When a delalloc extent is split (e.g., due to a hole punch), the original
4775 * indlen reservation must be shared across the two new extents that are left
4778 * Given the original reservation and the worst case indlen for the two new
4779 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4780 * reservation fairly across the two new extents. If necessary, steal available
4781 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4782 * ores == 1). The number of stolen blocks is returned. The availability and
4783 * subsequent accounting of stolen blocks is the responsibility of the caller.
4785 static xfs_filblks_t
4786 xfs_bmap_split_indlen(
4787 xfs_filblks_t ores, /* original res. */
4788 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4789 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4790 xfs_filblks_t avail) /* stealable blocks */
4792 xfs_filblks_t len1 = *indlen1;
4793 xfs_filblks_t len2 = *indlen2;
4794 xfs_filblks_t nres = len1 + len2; /* new total res. */
4795 xfs_filblks_t stolen = 0;
4796 xfs_filblks_t resfactor;
4799 * Steal as many blocks as we can to try and satisfy the worst case
4800 * indlen for both new extents.
4802 if (ores < nres && avail)
4803 stolen = XFS_FILBLKS_MIN(nres - ores, avail);
4806 /* nothing else to do if we've satisfied the new reservation */
4811 * We can't meet the total required reservation for the two extents.
4812 * Calculate the percent of the overall shortage between both extents
4813 * and apply this percentage to each of the requested indlen values.
4814 * This distributes the shortage fairly and reduces the chances that one
4815 * of the two extents is left with nothing when extents are repeatedly
4818 resfactor = (ores * 100);
4819 do_div(resfactor, nres);
4824 ASSERT(len1 + len2 <= ores);
4825 ASSERT(len1 < *indlen1 && len2 < *indlen2);
4828 * Hand out the remainder to each extent. If one of the two reservations
4829 * is zero, we want to make sure that one gets a block first. The loop
4830 * below starts with len1, so hand len2 a block right off the bat if it
4833 ores -= (len1 + len2);
4834 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4835 if (ores && !len2 && *indlen2) {
4840 if (len1 < *indlen1) {
4846 if (len2 < *indlen2) {
4859 xfs_bmap_del_extent_delay(
4860 struct xfs_inode *ip,
4863 struct xfs_bmbt_irec *got,
4864 struct xfs_bmbt_irec *del)
4866 struct xfs_mount *mp = ip->i_mount;
4867 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4868 struct xfs_bmbt_irec new;
4869 int64_t da_old, da_new, da_diff = 0;
4870 xfs_fileoff_t del_endoff, got_endoff;
4871 xfs_filblks_t got_indlen, new_indlen, stolen;
4872 int error = 0, state = 0;
4875 XFS_STATS_INC(mp, xs_del_exlist);
4877 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4878 del_endoff = del->br_startoff + del->br_blockcount;
4879 got_endoff = got->br_startoff + got->br_blockcount;
4880 da_old = startblockval(got->br_startblock);
4884 ASSERT(*idx <= xfs_iext_count(ifp));
4885 ASSERT(del->br_blockcount > 0);
4886 ASSERT(got->br_startoff <= del->br_startoff);
4887 ASSERT(got_endoff >= del_endoff);
4890 int64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
4892 do_div(rtexts, mp->m_sb.sb_rextsize);
4893 xfs_mod_frextents(mp, rtexts);
4897 * Update the inode delalloc counter now and wait to update the
4898 * sb counters as we might have to borrow some blocks for the
4899 * indirect block accounting.
4901 error = xfs_trans_reserve_quota_nblks(NULL, ip,
4902 -((long)del->br_blockcount), 0,
4903 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4906 ip->i_delayed_blks -= del->br_blockcount;
4908 if (whichfork == XFS_COW_FORK)
4909 state |= BMAP_COWFORK;
4911 if (got->br_startoff == del->br_startoff)
4912 state |= BMAP_LEFT_CONTIG;
4913 if (got_endoff == del_endoff)
4914 state |= BMAP_RIGHT_CONTIG;
4916 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
4917 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
4919 * Matches the whole extent. Delete the entry.
4921 xfs_iext_remove(ip, *idx, 1, state);
4924 case BMAP_LEFT_CONTIG:
4926 * Deleting the first part of the extent.
4928 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4929 got->br_startoff = del_endoff;
4930 got->br_blockcount -= del->br_blockcount;
4931 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4932 got->br_blockcount), da_old);
4933 got->br_startblock = nullstartblock((int)da_new);
4934 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
4935 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4937 case BMAP_RIGHT_CONTIG:
4939 * Deleting the last part of the extent.
4941 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4942 got->br_blockcount = got->br_blockcount - del->br_blockcount;
4943 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4944 got->br_blockcount), da_old);
4945 got->br_startblock = nullstartblock((int)da_new);
4946 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
4947 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4951 * Deleting the middle of the extent.
4953 * Distribute the original indlen reservation across the two new
4954 * extents. Steal blocks from the deleted extent if necessary.
4955 * Stealing blocks simply fudges the fdblocks accounting below.
4956 * Warn if either of the new indlen reservations is zero as this
4957 * can lead to delalloc problems.
4959 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4961 got->br_blockcount = del->br_startoff - got->br_startoff;
4962 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4964 new.br_blockcount = got_endoff - del_endoff;
4965 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4967 WARN_ON_ONCE(!got_indlen || !new_indlen);
4968 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
4969 del->br_blockcount);
4971 got->br_startblock = nullstartblock((int)got_indlen);
4972 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
4973 trace_xfs_bmap_post_update(ip, *idx, 0, _THIS_IP_);
4975 new.br_startoff = del_endoff;
4976 new.br_state = got->br_state;
4977 new.br_startblock = nullstartblock((int)new_indlen);
4980 xfs_iext_insert(ip, *idx, 1, &new, state);
4982 da_new = got_indlen + new_indlen - stolen;
4983 del->br_blockcount -= stolen;
4987 ASSERT(da_old >= da_new);
4988 da_diff = da_old - da_new;
4990 da_diff += del->br_blockcount;
4992 xfs_mod_fdblocks(mp, da_diff, false);
4997 xfs_bmap_del_extent_cow(
4998 struct xfs_inode *ip,
5000 struct xfs_bmbt_irec *got,
5001 struct xfs_bmbt_irec *del)
5003 struct xfs_mount *mp = ip->i_mount;
5004 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
5005 struct xfs_bmbt_irec new;
5006 xfs_fileoff_t del_endoff, got_endoff;
5007 int state = BMAP_COWFORK;
5009 XFS_STATS_INC(mp, xs_del_exlist);
5011 del_endoff = del->br_startoff + del->br_blockcount;
5012 got_endoff = got->br_startoff + got->br_blockcount;
5015 ASSERT(*idx <= xfs_iext_count(ifp));
5016 ASSERT(del->br_blockcount > 0);
5017 ASSERT(got->br_startoff <= del->br_startoff);
5018 ASSERT(got_endoff >= del_endoff);
5019 ASSERT(!isnullstartblock(got->br_startblock));
5021 if (got->br_startoff == del->br_startoff)
5022 state |= BMAP_LEFT_CONTIG;
5023 if (got_endoff == del_endoff)
5024 state |= BMAP_RIGHT_CONTIG;
5026 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
5027 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
5029 * Matches the whole extent. Delete the entry.
5031 xfs_iext_remove(ip, *idx, 1, state);
5034 case BMAP_LEFT_CONTIG:
5036 * Deleting the first part of the extent.
5038 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5039 got->br_startoff = del_endoff;
5040 got->br_blockcount -= del->br_blockcount;
5041 got->br_startblock = del->br_startblock + del->br_blockcount;
5042 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
5043 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5045 case BMAP_RIGHT_CONTIG:
5047 * Deleting the last part of the extent.
5049 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5050 got->br_blockcount -= del->br_blockcount;
5051 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
5052 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5056 * Deleting the middle of the extent.
5058 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5059 got->br_blockcount = del->br_startoff - got->br_startoff;
5060 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
5061 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5063 new.br_startoff = del_endoff;
5064 new.br_blockcount = got_endoff - del_endoff;
5065 new.br_state = got->br_state;
5066 new.br_startblock = del->br_startblock + del->br_blockcount;
5069 xfs_iext_insert(ip, *idx, 1, &new, state);
5075 * Called by xfs_bmapi to update file extent records and the btree
5076 * after removing space (or undoing a delayed allocation).
5078 STATIC int /* error */
5079 xfs_bmap_del_extent(
5080 xfs_inode_t *ip, /* incore inode pointer */
5081 xfs_trans_t *tp, /* current transaction pointer */
5082 xfs_extnum_t *idx, /* extent number to update/delete */
5083 struct xfs_defer_ops *dfops, /* list of extents to be freed */
5084 xfs_btree_cur_t *cur, /* if null, not a btree */
5085 xfs_bmbt_irec_t *del, /* data to remove from extents */
5086 int *logflagsp, /* inode logging flags */
5087 int whichfork, /* data or attr fork */
5088 int bflags) /* bmapi flags */
5090 xfs_filblks_t da_new; /* new delay-alloc indirect blocks */
5091 xfs_filblks_t da_old; /* old delay-alloc indirect blocks */
5092 xfs_fsblock_t del_endblock=0; /* first block past del */
5093 xfs_fileoff_t del_endoff; /* first offset past del */
5094 int delay; /* current block is delayed allocated */
5095 int do_fx; /* free extent at end of routine */
5096 xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */
5097 int error; /* error return value */
5098 int flags; /* inode logging flags */
5099 xfs_bmbt_irec_t got; /* current extent entry */
5100 xfs_fileoff_t got_endoff; /* first offset past got */
5101 int i; /* temp state */
5102 xfs_ifork_t *ifp; /* inode fork pointer */
5103 xfs_mount_t *mp; /* mount structure */
5104 xfs_filblks_t nblks; /* quota/sb block count */
5105 xfs_bmbt_irec_t new; /* new record to be inserted */
5107 uint qfield; /* quota field to update */
5108 xfs_filblks_t temp; /* for indirect length calculations */
5109 xfs_filblks_t temp2; /* for indirect length calculations */
5113 XFS_STATS_INC(mp, xs_del_exlist);
5115 if (whichfork == XFS_ATTR_FORK)
5116 state |= BMAP_ATTRFORK;
5117 else if (whichfork == XFS_COW_FORK)
5118 state |= BMAP_COWFORK;
5120 ifp = XFS_IFORK_PTR(ip, whichfork);
5121 ASSERT((*idx >= 0) && (*idx < xfs_iext_count(ifp)));
5122 ASSERT(del->br_blockcount > 0);
5123 ep = xfs_iext_get_ext(ifp, *idx);
5124 xfs_bmbt_get_all(ep, &got);
5125 ASSERT(got.br_startoff <= del->br_startoff);
5126 del_endoff = del->br_startoff + del->br_blockcount;
5127 got_endoff = got.br_startoff + got.br_blockcount;
5128 ASSERT(got_endoff >= del_endoff);
5129 delay = isnullstartblock(got.br_startblock);
5130 ASSERT(isnullstartblock(del->br_startblock) == delay);
5135 * If deleting a real allocation, must free up the disk space.
5138 flags = XFS_ILOG_CORE;
5140 * Realtime allocation. Free it and record di_nblocks update.
5142 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
5146 ASSERT(do_mod(del->br_blockcount,
5147 mp->m_sb.sb_rextsize) == 0);
5148 ASSERT(do_mod(del->br_startblock,
5149 mp->m_sb.sb_rextsize) == 0);
5150 bno = del->br_startblock;
5151 len = del->br_blockcount;
5152 do_div(bno, mp->m_sb.sb_rextsize);
5153 do_div(len, mp->m_sb.sb_rextsize);
5154 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
5158 nblks = len * mp->m_sb.sb_rextsize;
5159 qfield = XFS_TRANS_DQ_RTBCOUNT;
5162 * Ordinary allocation.
5166 nblks = del->br_blockcount;
5167 qfield = XFS_TRANS_DQ_BCOUNT;
5170 * Set up del_endblock and cur for later.
5172 del_endblock = del->br_startblock + del->br_blockcount;
5174 if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
5175 got.br_startblock, got.br_blockcount,
5178 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5180 da_old = da_new = 0;
5182 da_old = startblockval(got.br_startblock);
5189 * Set flag value to use in switch statement.
5190 * Left-contig is 2, right-contig is 1.
5192 switch (((got.br_startoff == del->br_startoff) << 1) |
5193 (got_endoff == del_endoff)) {
5196 * Matches the whole extent. Delete the entry.
5198 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5199 xfs_iext_remove(ip, *idx, 1,
5200 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
5205 XFS_IFORK_NEXT_SET(ip, whichfork,
5206 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5207 flags |= XFS_ILOG_CORE;
5209 flags |= xfs_ilog_fext(whichfork);
5212 if ((error = xfs_btree_delete(cur, &i)))
5214 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5219 * Deleting the first part of the extent.
5221 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5222 xfs_bmbt_set_startoff(ep, del_endoff);
5223 temp = got.br_blockcount - del->br_blockcount;
5224 xfs_bmbt_set_blockcount(ep, temp);
5226 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
5228 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5229 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5233 xfs_bmbt_set_startblock(ep, del_endblock);
5234 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5236 flags |= xfs_ilog_fext(whichfork);
5239 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
5240 got.br_blockcount - del->br_blockcount,
5247 * Deleting the last part of the extent.
5249 temp = got.br_blockcount - del->br_blockcount;
5250 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5251 xfs_bmbt_set_blockcount(ep, temp);
5253 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
5255 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5256 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5260 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5262 flags |= xfs_ilog_fext(whichfork);
5265 if ((error = xfs_bmbt_update(cur, got.br_startoff,
5267 got.br_blockcount - del->br_blockcount,
5274 * Deleting the middle of the extent.
5276 temp = del->br_startoff - got.br_startoff;
5277 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5278 xfs_bmbt_set_blockcount(ep, temp);
5279 new.br_startoff = del_endoff;
5280 temp2 = got_endoff - del_endoff;
5281 new.br_blockcount = temp2;
5282 new.br_state = got.br_state;
5284 new.br_startblock = del_endblock;
5285 flags |= XFS_ILOG_CORE;
5287 if ((error = xfs_bmbt_update(cur,
5289 got.br_startblock, temp,
5292 if ((error = xfs_btree_increment(cur, 0, &i)))
5294 cur->bc_rec.b = new;
5295 error = xfs_btree_insert(cur, &i);
5296 if (error && error != -ENOSPC)
5299 * If get no-space back from btree insert,
5300 * it tried a split, and we have a zero
5301 * block reservation.
5302 * Fix up our state and return the error.
5304 if (error == -ENOSPC) {
5306 * Reset the cursor, don't trust
5307 * it after any insert operation.
5309 if ((error = xfs_bmbt_lookup_eq(cur,
5314 XFS_WANT_CORRUPTED_GOTO(mp,
5317 * Update the btree record back
5318 * to the original value.
5320 if ((error = xfs_bmbt_update(cur,
5327 * Reset the extent record back
5328 * to the original value.
5330 xfs_bmbt_set_blockcount(ep,
5336 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5338 flags |= xfs_ilog_fext(whichfork);
5339 XFS_IFORK_NEXT_SET(ip, whichfork,
5340 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5342 xfs_filblks_t stolen;
5343 ASSERT(whichfork == XFS_DATA_FORK);
5346 * Distribute the original indlen reservation across the
5347 * two new extents. Steal blocks from the deleted extent
5348 * if necessary. Stealing blocks simply fudges the
5349 * fdblocks accounting in xfs_bunmapi().
5351 temp = xfs_bmap_worst_indlen(ip, got.br_blockcount);
5352 temp2 = xfs_bmap_worst_indlen(ip, new.br_blockcount);
5353 stolen = xfs_bmap_split_indlen(da_old, &temp, &temp2,
5354 del->br_blockcount);
5355 da_new = temp + temp2 - stolen;
5356 del->br_blockcount -= stolen;
5359 * Set the reservation for each extent. Warn if either
5360 * is zero as this can lead to delalloc problems.
5362 WARN_ON_ONCE(!temp || !temp2);
5363 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5364 new.br_startblock = nullstartblock((int)temp2);
5366 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5367 xfs_iext_insert(ip, *idx + 1, 1, &new, state);
5372 /* remove reverse mapping */
5374 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del);
5380 * If we need to, add to list of extents to delete.
5382 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
5383 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5384 error = xfs_refcount_decrease_extent(mp, dfops, del);
5388 xfs_bmap_add_free(mp, dfops, del->br_startblock,
5389 del->br_blockcount, NULL);
5393 * Adjust inode # blocks in the file.
5396 ip->i_d.di_nblocks -= nblks;
5398 * Adjust quota data.
5400 if (qfield && !(bflags & XFS_BMAPI_REMAP))
5401 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5404 * Account for change in delayed indirect blocks.
5405 * Nothing to do for disk quota accounting here.
5407 ASSERT(da_old >= da_new);
5408 if (da_old > da_new)
5409 xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);
5416 * Unmap (remove) blocks from a file.
5417 * If nexts is nonzero then the number of extents to remove is limited to
5418 * that value. If not all extents in the block range can be removed then
5423 xfs_trans_t *tp, /* transaction pointer */
5424 struct xfs_inode *ip, /* incore inode */
5425 xfs_fileoff_t bno, /* starting offset to unmap */
5426 xfs_filblks_t *rlen, /* i/o: amount remaining */
5427 int flags, /* misc flags */
5428 xfs_extnum_t nexts, /* number of extents max */
5429 xfs_fsblock_t *firstblock, /* first allocated block
5430 controls a.g. for allocs */
5431 struct xfs_defer_ops *dfops) /* i/o: deferred updates */
5433 xfs_btree_cur_t *cur; /* bmap btree cursor */
5434 xfs_bmbt_irec_t del; /* extent being deleted */
5435 int error; /* error return value */
5436 xfs_extnum_t extno; /* extent number in list */
5437 xfs_bmbt_irec_t got; /* current extent record */
5438 xfs_ifork_t *ifp; /* inode fork pointer */
5439 int isrt; /* freeing in rt area */
5440 xfs_extnum_t lastx; /* last extent index used */
5441 int logflags; /* transaction logging flags */
5442 xfs_extlen_t mod; /* rt extent offset */
5443 xfs_mount_t *mp; /* mount structure */
5444 xfs_fileoff_t start; /* first file offset deleted */
5445 int tmp_logflags; /* partial logging flags */
5446 int wasdel; /* was a delayed alloc extent */
5447 int whichfork; /* data or attribute fork */
5449 xfs_filblks_t len = *rlen; /* length to unmap in file */
5451 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
5453 whichfork = xfs_bmapi_whichfork(flags);
5454 ASSERT(whichfork != XFS_COW_FORK);
5455 ifp = XFS_IFORK_PTR(ip, whichfork);
5457 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5458 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5459 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5461 return -EFSCORRUPTED;
5464 if (XFS_FORCED_SHUTDOWN(mp))
5467 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5471 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5472 (error = xfs_iread_extents(tp, ip, whichfork)))
5474 if (xfs_iext_count(ifp) == 0) {
5478 XFS_STATS_INC(mp, xs_blk_unmap);
5479 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5481 bno = start + len - 1;
5484 * Check to see if the given block number is past the end of the
5485 * file, back up to the last block if so...
5487 if (!xfs_iext_lookup_extent(ip, ifp, bno, &lastx, &got)) {
5489 xfs_iext_get_extent(ifp, --lastx, &got);
5490 bno = got.br_startoff + got.br_blockcount - 1;
5494 if (ifp->if_flags & XFS_IFBROOT) {
5495 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5496 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5497 cur->bc_private.b.firstblock = *firstblock;
5498 cur->bc_private.b.dfops = dfops;
5499 cur->bc_private.b.flags = 0;
5505 * Synchronize by locking the bitmap inode.
5507 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
5508 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5509 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5510 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
5514 while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
5515 (nexts == 0 || extno < nexts)) {
5517 * Is the found extent after a hole in which bno lives?
5518 * Just back up to the previous extent, if so.
5520 if (got.br_startoff > bno) {
5523 xfs_iext_get_extent(ifp, lastx, &got);
5526 * Is the last block of this extent before the range
5527 * we're supposed to delete? If so, we're done.
5529 bno = XFS_FILEOFF_MIN(bno,
5530 got.br_startoff + got.br_blockcount - 1);
5534 * Then deal with the (possibly delayed) allocated space
5538 wasdel = isnullstartblock(del.br_startblock);
5539 if (got.br_startoff < start) {
5540 del.br_startoff = start;
5541 del.br_blockcount -= start - got.br_startoff;
5543 del.br_startblock += start - got.br_startoff;
5545 if (del.br_startoff + del.br_blockcount > bno + 1)
5546 del.br_blockcount = bno + 1 - del.br_startoff;
5547 sum = del.br_startblock + del.br_blockcount;
5549 (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
5551 * Realtime extent not lined up at the end.
5552 * The extent could have been split into written
5553 * and unwritten pieces, or we could just be
5554 * unmapping part of it. But we can't really
5555 * get rid of part of a realtime extent.
5557 if (del.br_state == XFS_EXT_UNWRITTEN ||
5558 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5560 * This piece is unwritten, or we're not
5561 * using unwritten extents. Skip over it.
5564 bno -= mod > del.br_blockcount ?
5565 del.br_blockcount : mod;
5566 if (bno < got.br_startoff) {
5568 xfs_bmbt_get_all(xfs_iext_get_ext(
5574 * It's written, turn it unwritten.
5575 * This is better than zeroing it.
5577 ASSERT(del.br_state == XFS_EXT_NORM);
5578 ASSERT(tp->t_blk_res > 0);
5580 * If this spans a realtime extent boundary,
5581 * chop it back to the start of the one we end at.
5583 if (del.br_blockcount > mod) {
5584 del.br_startoff += del.br_blockcount - mod;
5585 del.br_startblock += del.br_blockcount - mod;
5586 del.br_blockcount = mod;
5588 del.br_state = XFS_EXT_UNWRITTEN;
5589 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5590 whichfork, &lastx, &cur, &del,
5591 firstblock, dfops, &logflags);
5596 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
5598 * Realtime extent is lined up at the end but not
5599 * at the front. We'll get rid of full extents if
5602 mod = mp->m_sb.sb_rextsize - mod;
5603 if (del.br_blockcount > mod) {
5604 del.br_blockcount -= mod;
5605 del.br_startoff += mod;
5606 del.br_startblock += mod;
5607 } else if ((del.br_startoff == start &&
5608 (del.br_state == XFS_EXT_UNWRITTEN ||
5609 tp->t_blk_res == 0)) ||
5610 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5612 * Can't make it unwritten. There isn't
5613 * a full extent here so just skip it.
5615 ASSERT(bno >= del.br_blockcount);
5616 bno -= del.br_blockcount;
5617 if (got.br_startoff > bno && --lastx >= 0)
5618 xfs_iext_get_extent(ifp, lastx, &got);
5620 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5621 struct xfs_bmbt_irec prev;
5624 * This one is already unwritten.
5625 * It must have a written left neighbor.
5626 * Unwrite the killed part of that one and
5630 xfs_iext_get_extent(ifp, lastx - 1, &prev);
5631 ASSERT(prev.br_state == XFS_EXT_NORM);
5632 ASSERT(!isnullstartblock(prev.br_startblock));
5633 ASSERT(del.br_startblock ==
5634 prev.br_startblock + prev.br_blockcount);
5635 if (prev.br_startoff < start) {
5636 mod = start - prev.br_startoff;
5637 prev.br_blockcount -= mod;
5638 prev.br_startblock += mod;
5639 prev.br_startoff = start;
5641 prev.br_state = XFS_EXT_UNWRITTEN;
5643 error = xfs_bmap_add_extent_unwritten_real(tp,
5644 ip, whichfork, &lastx, &cur,
5645 &prev, firstblock, dfops,
5651 ASSERT(del.br_state == XFS_EXT_NORM);
5652 del.br_state = XFS_EXT_UNWRITTEN;
5653 error = xfs_bmap_add_extent_unwritten_real(tp,
5654 ip, whichfork, &lastx, &cur,
5655 &del, firstblock, dfops,
5664 * If it's the case where the directory code is running
5665 * with no block reservation, and the deleted block is in
5666 * the middle of its extent, and the resulting insert
5667 * of an extent would cause transformation to btree format,
5668 * then reject it. The calling code will then swap
5669 * blocks around instead.
5670 * We have to do this now, rather than waiting for the
5671 * conversion to btree format, since the transaction
5674 if (!wasdel && tp->t_blk_res == 0 &&
5675 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5676 XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */
5677 XFS_IFORK_MAXEXT(ip, whichfork) &&
5678 del.br_startoff > got.br_startoff &&
5679 del.br_startoff + del.br_blockcount <
5680 got.br_startoff + got.br_blockcount) {
5686 * Unreserve quota and update realtime free space, if
5687 * appropriate. If delayed allocation, update the inode delalloc
5688 * counter now and wait to update the sb counters as
5689 * xfs_bmap_del_extent() might need to borrow some blocks.
5692 ASSERT(startblockval(del.br_startblock) > 0);
5694 xfs_filblks_t rtexts;
5696 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
5697 do_div(rtexts, mp->m_sb.sb_rextsize);
5698 xfs_mod_frextents(mp, (int64_t)rtexts);
5699 (void)xfs_trans_reserve_quota_nblks(NULL,
5700 ip, -((long)del.br_blockcount), 0,
5701 XFS_QMOPT_RES_RTBLKS);
5703 (void)xfs_trans_reserve_quota_nblks(NULL,
5704 ip, -((long)del.br_blockcount), 0,
5705 XFS_QMOPT_RES_REGBLKS);
5707 ip->i_delayed_blks -= del.br_blockcount;
5709 cur->bc_private.b.flags |=
5710 XFS_BTCUR_BPRV_WASDEL;
5712 cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
5714 error = xfs_bmap_del_extent(ip, tp, &lastx, dfops, cur, &del,
5715 &tmp_logflags, whichfork, flags);
5716 logflags |= tmp_logflags;
5720 if (!isrt && wasdel)
5721 xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount, false);
5723 bno = del.br_startoff - 1;
5726 * If not done go on to the next (previous) record.
5728 if (bno != (xfs_fileoff_t)-1 && bno >= start) {
5730 xfs_iext_get_extent(ifp, lastx, &got);
5731 if (got.br_startoff > bno && --lastx >= 0)
5732 xfs_iext_get_extent(ifp, lastx, &got);
5737 if (bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0)
5740 *rlen = bno - start + 1;
5743 * Convert to a btree if necessary.
5745 if (xfs_bmap_needs_btree(ip, whichfork)) {
5746 ASSERT(cur == NULL);
5747 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops,
5748 &cur, 0, &tmp_logflags, whichfork);
5749 logflags |= tmp_logflags;
5754 * transform from btree to extents, give it cur
5756 else if (xfs_bmap_wants_extents(ip, whichfork)) {
5757 ASSERT(cur != NULL);
5758 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5760 logflags |= tmp_logflags;
5765 * transform from extents to local?
5770 * Log everything. Do this after conversion, there's no point in
5771 * logging the extent records if we've converted to btree format.
5773 if ((logflags & xfs_ilog_fext(whichfork)) &&
5774 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5775 logflags &= ~xfs_ilog_fext(whichfork);
5776 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5777 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5778 logflags &= ~xfs_ilog_fbroot(whichfork);
5780 * Log inode even in the error case, if the transaction
5781 * is dirty we'll need to shut down the filesystem.
5784 xfs_trans_log_inode(tp, ip, logflags);
5787 *firstblock = cur->bc_private.b.firstblock;
5788 cur->bc_private.b.allocated = 0;
5790 xfs_btree_del_cursor(cur,
5791 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5796 /* Unmap a range of a file. */
5800 struct xfs_inode *ip,
5805 xfs_fsblock_t *firstblock,
5806 struct xfs_defer_ops *dfops,
5811 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts, firstblock,
5818 * Determine whether an extent shift can be accomplished by a merge with the
5819 * extent that precedes the target hole of the shift.
5823 struct xfs_bmbt_irec *left, /* preceding extent */
5824 struct xfs_bmbt_irec *got, /* current extent to shift */
5825 xfs_fileoff_t shift) /* shift fsb */
5827 xfs_fileoff_t startoff;
5829 startoff = got->br_startoff - shift;
5832 * The extent, once shifted, must be adjacent in-file and on-disk with
5833 * the preceding extent.
5835 if ((left->br_startoff + left->br_blockcount != startoff) ||
5836 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5837 (left->br_state != got->br_state) ||
5838 (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
5845 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5846 * hole in the file. If an extent shift would result in the extent being fully
5847 * adjacent to the extent that currently precedes the hole, we can merge with
5848 * the preceding extent rather than do the shift.
5850 * This function assumes the caller has verified a shift-by-merge is possible
5851 * with the provided extents via xfs_bmse_can_merge().
5855 struct xfs_inode *ip,
5857 xfs_fileoff_t shift, /* shift fsb */
5858 int current_ext, /* idx of gotp */
5859 struct xfs_bmbt_rec_host *gotp, /* extent to shift */
5860 struct xfs_bmbt_rec_host *leftp, /* preceding extent */
5861 struct xfs_btree_cur *cur,
5862 int *logflags) /* output */
5864 struct xfs_bmbt_irec got;
5865 struct xfs_bmbt_irec left;
5866 xfs_filblks_t blockcount;
5868 struct xfs_mount *mp = ip->i_mount;
5870 xfs_bmbt_get_all(gotp, &got);
5871 xfs_bmbt_get_all(leftp, &left);
5872 blockcount = left.br_blockcount + got.br_blockcount;
5874 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5875 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5876 ASSERT(xfs_bmse_can_merge(&left, &got, shift));
5879 * Merge the in-core extents. Note that the host record pointers and
5880 * current_ext index are invalid once the extent has been removed via
5881 * xfs_iext_remove().
5883 xfs_bmbt_set_blockcount(leftp, blockcount);
5884 xfs_iext_remove(ip, current_ext, 1, 0);
5887 * Update the on-disk extent count, the btree if necessary and log the
5890 XFS_IFORK_NEXT_SET(ip, whichfork,
5891 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5892 *logflags |= XFS_ILOG_CORE;
5894 *logflags |= XFS_ILOG_DEXT;
5898 /* lookup and remove the extent to merge */
5899 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
5900 got.br_blockcount, &i);
5903 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5905 error = xfs_btree_delete(cur, &i);
5908 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5910 /* lookup and update size of the previous extent */
5911 error = xfs_bmbt_lookup_eq(cur, left.br_startoff, left.br_startblock,
5912 left.br_blockcount, &i);
5915 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5917 left.br_blockcount = blockcount;
5919 return xfs_bmbt_update(cur, left.br_startoff, left.br_startblock,
5920 left.br_blockcount, left.br_state);
5924 * Shift a single extent.
5928 struct xfs_inode *ip,
5930 xfs_fileoff_t offset_shift_fsb,
5932 struct xfs_bmbt_rec_host *gotp,
5933 struct xfs_btree_cur *cur,
5935 enum shift_direction direction,
5936 struct xfs_defer_ops *dfops)
5938 struct xfs_ifork *ifp;
5939 struct xfs_mount *mp;
5940 xfs_fileoff_t startoff;
5941 struct xfs_bmbt_rec_host *adj_irecp;
5942 struct xfs_bmbt_irec got;
5943 struct xfs_bmbt_irec adj_irec;
5949 ifp = XFS_IFORK_PTR(ip, whichfork);
5950 total_extents = xfs_iext_count(ifp);
5952 xfs_bmbt_get_all(gotp, &got);
5954 /* delalloc extents should be prevented by caller */
5955 XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock));
5957 if (direction == SHIFT_LEFT) {
5958 startoff = got.br_startoff - offset_shift_fsb;
5961 * Check for merge if we've got an extent to the left,
5962 * otherwise make sure there's enough room at the start
5963 * of the file for the shift.
5965 if (!*current_ext) {
5966 if (got.br_startoff < offset_shift_fsb)
5968 goto update_current_ext;
5971 * grab the left extent and check for a large
5974 adj_irecp = xfs_iext_get_ext(ifp, *current_ext - 1);
5975 xfs_bmbt_get_all(adj_irecp, &adj_irec);
5978 adj_irec.br_startoff + adj_irec.br_blockcount)
5981 /* check whether to merge the extent or shift it down */
5982 if (xfs_bmse_can_merge(&adj_irec, &got,
5983 offset_shift_fsb)) {
5984 error = xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
5985 *current_ext, gotp, adj_irecp,
5993 startoff = got.br_startoff + offset_shift_fsb;
5994 /* nothing to move if this is the last extent */
5995 if (*current_ext >= (total_extents - 1))
5996 goto update_current_ext;
5998 * If this is not the last extent in the file, make sure there
5999 * is enough room between current extent and next extent for
6000 * accommodating the shift.
6002 adj_irecp = xfs_iext_get_ext(ifp, *current_ext + 1);
6003 xfs_bmbt_get_all(adj_irecp, &adj_irec);
6004 if (startoff + got.br_blockcount > adj_irec.br_startoff)
6007 * Unlike a left shift (which involves a hole punch),
6008 * a right shift does not modify extent neighbors
6009 * in any way. We should never find mergeable extents
6010 * in this scenario. Check anyways and warn if we
6011 * encounter two extents that could be one.
6013 if (xfs_bmse_can_merge(&got, &adj_irec, offset_shift_fsb))
6017 * Increment the extent index for the next iteration, update the start
6018 * offset of the in-core extent and update the btree if applicable.
6021 if (direction == SHIFT_LEFT)
6025 xfs_bmbt_set_startoff(gotp, startoff);
6026 *logflags |= XFS_ILOG_CORE;
6029 *logflags |= XFS_ILOG_DEXT;
6033 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
6034 got.br_blockcount, &i);
6037 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
6039 got.br_startoff = startoff;
6040 error = xfs_bmbt_update(cur, got.br_startoff, got.br_startblock,
6041 got.br_blockcount, got.br_state);
6046 /* update reverse mapping */
6047 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, &adj_irec);
6050 adj_irec.br_startoff = startoff;
6051 return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &adj_irec);
6055 * Shift extent records to the left/right to cover/create a hole.
6057 * The maximum number of extents to be shifted in a single operation is
6058 * @num_exts. @stop_fsb specifies the file offset at which to stop shift and the
6059 * file offset where we've left off is returned in @next_fsb. @offset_shift_fsb
6060 * is the length by which each extent is shifted. If there is no hole to shift
6061 * the extents into, this will be considered invalid operation and we abort
6065 xfs_bmap_shift_extents(
6066 struct xfs_trans *tp,
6067 struct xfs_inode *ip,
6068 xfs_fileoff_t *next_fsb,
6069 xfs_fileoff_t offset_shift_fsb,
6071 xfs_fileoff_t stop_fsb,
6072 xfs_fsblock_t *firstblock,
6073 struct xfs_defer_ops *dfops,
6074 enum shift_direction direction,
6077 struct xfs_btree_cur *cur = NULL;
6078 struct xfs_bmbt_rec_host *gotp;
6079 struct xfs_bmbt_irec got;
6080 struct xfs_mount *mp = ip->i_mount;
6081 struct xfs_ifork *ifp;
6082 xfs_extnum_t nexts = 0;
6083 xfs_extnum_t current_ext;
6084 xfs_extnum_t total_extents;
6085 xfs_extnum_t stop_extent;
6087 int whichfork = XFS_DATA_FORK;
6090 if (unlikely(XFS_TEST_ERROR(
6091 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
6092 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
6093 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
6094 XFS_ERROR_REPORT("xfs_bmap_shift_extents",
6095 XFS_ERRLEVEL_LOW, mp);
6096 return -EFSCORRUPTED;
6099 if (XFS_FORCED_SHUTDOWN(mp))
6102 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
6103 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
6104 ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
6105 ASSERT(*next_fsb != NULLFSBLOCK || direction == SHIFT_RIGHT);
6107 ifp = XFS_IFORK_PTR(ip, whichfork);
6108 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
6109 /* Read in all the extents */
6110 error = xfs_iread_extents(tp, ip, whichfork);
6115 if (ifp->if_flags & XFS_IFBROOT) {
6116 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6117 cur->bc_private.b.firstblock = *firstblock;
6118 cur->bc_private.b.dfops = dfops;
6119 cur->bc_private.b.flags = 0;
6123 * There may be delalloc extents in the data fork before the range we
6124 * are collapsing out, so we cannot use the count of real extents here.
6125 * Instead we have to calculate it from the incore fork.
6127 total_extents = xfs_iext_count(ifp);
6128 if (total_extents == 0) {
6134 * In case of first right shift, we need to initialize next_fsb
6136 if (*next_fsb == NULLFSBLOCK) {
6137 gotp = xfs_iext_get_ext(ifp, total_extents - 1);
6138 xfs_bmbt_get_all(gotp, &got);
6139 *next_fsb = got.br_startoff;
6140 if (stop_fsb > *next_fsb) {
6146 /* Lookup the extent index at which we have to stop */
6147 if (direction == SHIFT_RIGHT) {
6148 gotp = xfs_iext_bno_to_ext(ifp, stop_fsb, &stop_extent);
6149 /* Make stop_extent exclusive of shift range */
6152 stop_extent = total_extents;
6155 * Look up the extent index for the fsb where we start shifting. We can
6156 * henceforth iterate with current_ext as extent list changes are locked
6159 * gotp can be null in 2 cases: 1) if there are no extents or 2)
6160 * *next_fsb lies in a hole beyond which there are no extents. Either
6163 gotp = xfs_iext_bno_to_ext(ifp, *next_fsb, ¤t_ext);
6169 /* some sanity checking before we finally start shifting extents */
6170 if ((direction == SHIFT_LEFT && current_ext >= stop_extent) ||
6171 (direction == SHIFT_RIGHT && current_ext <= stop_extent)) {
6176 while (nexts++ < num_exts) {
6177 error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb,
6178 ¤t_ext, gotp, cur, &logflags,
6183 * If there was an extent merge during the shift, the extent
6184 * count can change. Update the total and grade the next record.
6186 if (direction == SHIFT_LEFT) {
6187 total_extents = xfs_iext_count(ifp);
6188 stop_extent = total_extents;
6191 if (current_ext == stop_extent) {
6193 *next_fsb = NULLFSBLOCK;
6196 gotp = xfs_iext_get_ext(ifp, current_ext);
6200 xfs_bmbt_get_all(gotp, &got);
6201 *next_fsb = got.br_startoff;
6206 xfs_btree_del_cursor(cur,
6207 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
6210 xfs_trans_log_inode(tp, ip, logflags);
6216 * Splits an extent into two extents at split_fsb block such that it is
6217 * the first block of the current_ext. @current_ext is a target extent
6218 * to be split. @split_fsb is a block where the extents is split.
6219 * If split_fsb lies in a hole or the first block of extents, just return 0.
6222 xfs_bmap_split_extent_at(
6223 struct xfs_trans *tp,
6224 struct xfs_inode *ip,
6225 xfs_fileoff_t split_fsb,
6226 xfs_fsblock_t *firstfsb,
6227 struct xfs_defer_ops *dfops)
6229 int whichfork = XFS_DATA_FORK;
6230 struct xfs_btree_cur *cur = NULL;
6231 struct xfs_bmbt_rec_host *gotp;
6232 struct xfs_bmbt_irec got;
6233 struct xfs_bmbt_irec new; /* split extent */
6234 struct xfs_mount *mp = ip->i_mount;
6235 struct xfs_ifork *ifp;
6236 xfs_fsblock_t gotblkcnt; /* new block count for got */
6237 xfs_extnum_t current_ext;
6242 if (unlikely(XFS_TEST_ERROR(
6243 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
6244 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
6245 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
6246 XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
6247 XFS_ERRLEVEL_LOW, mp);
6248 return -EFSCORRUPTED;
6251 if (XFS_FORCED_SHUTDOWN(mp))
6254 ifp = XFS_IFORK_PTR(ip, whichfork);
6255 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
6256 /* Read in all the extents */
6257 error = xfs_iread_extents(tp, ip, whichfork);
6263 * gotp can be null in 2 cases: 1) if there are no extents
6264 * or 2) split_fsb lies in a hole beyond which there are
6265 * no extents. Either way, we are done.
6267 gotp = xfs_iext_bno_to_ext(ifp, split_fsb, ¤t_ext);
6271 xfs_bmbt_get_all(gotp, &got);
6274 * Check split_fsb lies in a hole or the start boundary offset
6277 if (got.br_startoff >= split_fsb)
6280 gotblkcnt = split_fsb - got.br_startoff;
6281 new.br_startoff = split_fsb;
6282 new.br_startblock = got.br_startblock + gotblkcnt;
6283 new.br_blockcount = got.br_blockcount - gotblkcnt;
6284 new.br_state = got.br_state;
6286 if (ifp->if_flags & XFS_IFBROOT) {
6287 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6288 cur->bc_private.b.firstblock = *firstfsb;
6289 cur->bc_private.b.dfops = dfops;
6290 cur->bc_private.b.flags = 0;
6291 error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
6297 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
6300 xfs_bmbt_set_blockcount(gotp, gotblkcnt);
6301 got.br_blockcount = gotblkcnt;
6303 logflags = XFS_ILOG_CORE;
6305 error = xfs_bmbt_update(cur, got.br_startoff,
6312 logflags |= XFS_ILOG_DEXT;
6314 /* Add new extent */
6316 xfs_iext_insert(ip, current_ext, 1, &new, 0);
6317 XFS_IFORK_NEXT_SET(ip, whichfork,
6318 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
6321 error = xfs_bmbt_lookup_eq(cur, new.br_startoff,
6322 new.br_startblock, new.br_blockcount,
6326 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
6327 cur->bc_rec.b.br_state = new.br_state;
6329 error = xfs_btree_insert(cur, &i);
6332 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
6336 * Convert to a btree if necessary.
6338 if (xfs_bmap_needs_btree(ip, whichfork)) {
6339 int tmp_logflags; /* partial log flag return val */
6341 ASSERT(cur == NULL);
6342 error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, dfops,
6343 &cur, 0, &tmp_logflags, whichfork);
6344 logflags |= tmp_logflags;
6349 cur->bc_private.b.allocated = 0;
6350 xfs_btree_del_cursor(cur,
6351 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
6355 xfs_trans_log_inode(tp, ip, logflags);
6360 xfs_bmap_split_extent(
6361 struct xfs_inode *ip,
6362 xfs_fileoff_t split_fsb)
6364 struct xfs_mount *mp = ip->i_mount;
6365 struct xfs_trans *tp;
6366 struct xfs_defer_ops dfops;
6367 xfs_fsblock_t firstfsb;
6370 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
6371 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
6375 xfs_ilock(ip, XFS_ILOCK_EXCL);
6376 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
6378 xfs_defer_init(&dfops, &firstfsb);
6380 error = xfs_bmap_split_extent_at(tp, ip, split_fsb,
6385 error = xfs_defer_finish(&tp, &dfops, NULL);
6389 return xfs_trans_commit(tp);
6392 xfs_defer_cancel(&dfops);
6393 xfs_trans_cancel(tp);
6397 /* Deferred mapping is only for real extents in the data fork. */
6399 xfs_bmap_is_update_needed(
6400 struct xfs_bmbt_irec *bmap)
6402 return bmap->br_startblock != HOLESTARTBLOCK &&
6403 bmap->br_startblock != DELAYSTARTBLOCK;
6406 /* Record a bmap intent. */
6409 struct xfs_mount *mp,
6410 struct xfs_defer_ops *dfops,
6411 enum xfs_bmap_intent_type type,
6412 struct xfs_inode *ip,
6414 struct xfs_bmbt_irec *bmap)
6417 struct xfs_bmap_intent *bi;
6419 trace_xfs_bmap_defer(mp,
6420 XFS_FSB_TO_AGNO(mp, bmap->br_startblock),
6422 XFS_FSB_TO_AGBNO(mp, bmap->br_startblock),
6423 ip->i_ino, whichfork,
6425 bmap->br_blockcount,
6428 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS);
6429 INIT_LIST_HEAD(&bi->bi_list);
6432 bi->bi_whichfork = whichfork;
6433 bi->bi_bmap = *bmap;
6435 error = xfs_defer_join(dfops, bi->bi_owner);
6441 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
6445 /* Map an extent into a file. */
6447 xfs_bmap_map_extent(
6448 struct xfs_mount *mp,
6449 struct xfs_defer_ops *dfops,
6450 struct xfs_inode *ip,
6451 struct xfs_bmbt_irec *PREV)
6453 if (!xfs_bmap_is_update_needed(PREV))
6456 return __xfs_bmap_add(mp, dfops, XFS_BMAP_MAP, ip,
6457 XFS_DATA_FORK, PREV);
6460 /* Unmap an extent out of a file. */
6462 xfs_bmap_unmap_extent(
6463 struct xfs_mount *mp,
6464 struct xfs_defer_ops *dfops,
6465 struct xfs_inode *ip,
6466 struct xfs_bmbt_irec *PREV)
6468 if (!xfs_bmap_is_update_needed(PREV))
6471 return __xfs_bmap_add(mp, dfops, XFS_BMAP_UNMAP, ip,
6472 XFS_DATA_FORK, PREV);
6476 * Process one of the deferred bmap operations. We pass back the
6477 * btree cursor to maintain our lock on the bmapbt between calls.
6480 xfs_bmap_finish_one(
6481 struct xfs_trans *tp,
6482 struct xfs_defer_ops *dfops,
6483 struct xfs_inode *ip,
6484 enum xfs_bmap_intent_type type,
6486 xfs_fileoff_t startoff,
6487 xfs_fsblock_t startblock,
6488 xfs_filblks_t blockcount,
6491 struct xfs_bmbt_irec bmap;
6493 xfs_fsblock_t firstfsb;
6494 int flags = XFS_BMAPI_REMAP;
6498 bmap.br_startblock = startblock;
6499 bmap.br_startoff = startoff;
6500 bmap.br_blockcount = blockcount;
6501 bmap.br_state = state;
6503 trace_xfs_bmap_deferred(tp->t_mountp,
6504 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
6505 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
6506 ip->i_ino, whichfork, startoff, blockcount, state);
6508 if (whichfork != XFS_DATA_FORK && whichfork != XFS_ATTR_FORK)
6509 return -EFSCORRUPTED;
6510 if (whichfork == XFS_ATTR_FORK)
6511 flags |= XFS_BMAPI_ATTRFORK;
6513 if (XFS_TEST_ERROR(false, tp->t_mountp,
6514 XFS_ERRTAG_BMAP_FINISH_ONE,
6515 XFS_RANDOM_BMAP_FINISH_ONE))
6520 firstfsb = bmap.br_startblock;
6521 error = xfs_bmapi_write(tp, ip, bmap.br_startoff,
6522 bmap.br_blockcount, flags, &firstfsb,
6523 bmap.br_blockcount, &bmap, &nimaps,
6526 case XFS_BMAP_UNMAP:
6527 error = xfs_bunmapi(tp, ip, bmap.br_startoff,
6528 bmap.br_blockcount, flags, 1, &firstfsb,
6534 error = -EFSCORRUPTED;