2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/log2.h>
22 #include "xfs_types.h"
27 #include "xfs_trans.h"
28 #include "xfs_trans_priv.h"
32 #include "xfs_dmapi.h"
33 #include "xfs_mount.h"
34 #include "xfs_bmap_btree.h"
35 #include "xfs_alloc_btree.h"
36 #include "xfs_ialloc_btree.h"
37 #include "xfs_dir2_sf.h"
38 #include "xfs_attr_sf.h"
39 #include "xfs_dinode.h"
40 #include "xfs_inode.h"
41 #include "xfs_buf_item.h"
42 #include "xfs_inode_item.h"
43 #include "xfs_btree.h"
44 #include "xfs_alloc.h"
45 #include "xfs_ialloc.h"
48 #include "xfs_error.h"
49 #include "xfs_utils.h"
50 #include "xfs_dir2_trace.h"
51 #include "xfs_quota.h"
53 #include "xfs_filestream.h"
54 #include "xfs_vnodeops.h"
56 kmem_zone_t *xfs_ifork_zone;
57 kmem_zone_t *xfs_inode_zone;
58 kmem_zone_t *xfs_icluster_zone;
61 * Used in xfs_itruncate(). This is the maximum number of extents
62 * freed from a file in a single transaction.
64 #define XFS_ITRUNC_MAX_EXTENTS 2
66 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
67 STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
68 STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
69 STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
73 * Make sure that the extents in the given memory buffer
83 xfs_bmbt_rec_host_t rec;
86 for (i = 0; i < nrecs; i++) {
87 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
88 rec.l0 = get_unaligned(&ep->l0);
89 rec.l1 = get_unaligned(&ep->l1);
90 xfs_bmbt_get_all(&rec, &irec);
91 if (fmt == XFS_EXTFMT_NOSTATE)
92 ASSERT(irec.br_state == XFS_EXT_NORM);
96 #define xfs_validate_extents(ifp, nrecs, fmt)
100 * Check that none of the inode's in the buffer have a next
101 * unlinked field of 0.
113 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
115 for (i = 0; i < j; i++) {
116 dip = (xfs_dinode_t *)xfs_buf_offset(bp,
117 i * mp->m_sb.sb_inodesize);
118 if (!dip->di_next_unlinked) {
119 xfs_fs_cmn_err(CE_ALERT, mp,
120 "Detected a bogus zero next_unlinked field in incore inode buffer 0x%p. About to pop an ASSERT.",
122 ASSERT(dip->di_next_unlinked);
129 * Find the buffer associated with the given inode map
130 * We do basic validation checks on the buffer once it has been
131 * retrieved from disk.
147 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
148 (int)imap->im_len, buf_flags, &bp);
150 if (error != EAGAIN) {
152 "xfs_imap_to_bp: xfs_trans_read_buf()returned "
153 "an error %d on %s. Returning error.",
154 error, mp->m_fsname);
156 ASSERT(buf_flags & XFS_BUF_TRYLOCK);
162 * Validate the magic number and version of every inode in the buffer
163 * (if DEBUG kernel) or the first inode in the buffer, otherwise.
166 ni = BBTOB(imap->im_len) >> mp->m_sb.sb_inodelog;
167 #else /* usual case */
171 for (i = 0; i < ni; i++) {
175 dip = (xfs_dinode_t *)xfs_buf_offset(bp,
176 (i << mp->m_sb.sb_inodelog));
177 di_ok = be16_to_cpu(dip->di_core.di_magic) == XFS_DINODE_MAGIC &&
178 XFS_DINODE_GOOD_VERSION(dip->di_core.di_version);
179 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
180 XFS_ERRTAG_ITOBP_INOTOBP,
181 XFS_RANDOM_ITOBP_INOTOBP))) {
182 if (imap_flags & XFS_IMAP_BULKSTAT) {
183 xfs_trans_brelse(tp, bp);
184 return XFS_ERROR(EINVAL);
186 XFS_CORRUPTION_ERROR("xfs_imap_to_bp",
187 XFS_ERRLEVEL_HIGH, mp, dip);
190 "Device %s - bad inode magic/vsn "
191 "daddr %lld #%d (magic=%x)",
192 XFS_BUFTARG_NAME(mp->m_ddev_targp),
193 (unsigned long long)imap->im_blkno, i,
194 be16_to_cpu(dip->di_core.di_magic));
196 xfs_trans_brelse(tp, bp);
197 return XFS_ERROR(EFSCORRUPTED);
201 xfs_inobp_check(mp, bp);
204 * Mark the buffer as an inode buffer now that it looks good
206 XFS_BUF_SET_VTYPE(bp, B_FS_INO);
213 * This routine is called to map an inode number within a file
214 * system to the buffer containing the on-disk version of the
215 * inode. It returns a pointer to the buffer containing the
216 * on-disk inode in the bpp parameter, and in the dip parameter
217 * it returns a pointer to the on-disk inode within that buffer.
219 * If a non-zero error is returned, then the contents of bpp and
220 * dipp are undefined.
222 * Use xfs_imap() to determine the size and location of the
223 * buffer to read from disk.
239 error = xfs_imap(mp, tp, ino, &imap, XFS_IMAP_LOOKUP);
243 error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, 0);
247 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
249 *offset = imap.im_boffset;
255 * This routine is called to map an inode to the buffer containing
256 * the on-disk version of the inode. It returns a pointer to the
257 * buffer containing the on-disk inode in the bpp parameter, and in
258 * the dip parameter it returns a pointer to the on-disk inode within
261 * If a non-zero error is returned, then the contents of bpp and
262 * dipp are undefined.
264 * If the inode is new and has not yet been initialized, use xfs_imap()
265 * to determine the size and location of the buffer to read from disk.
266 * If the inode has already been mapped to its buffer and read in once,
267 * then use the mapping information stored in the inode rather than
268 * calling xfs_imap(). This allows us to avoid the overhead of looking
269 * at the inode btree for small block file systems (see xfs_dilocate()).
270 * We can tell whether the inode has been mapped in before by comparing
271 * its disk block address to 0. Only uninitialized inodes will have
272 * 0 for the disk block address.
289 if (ip->i_blkno == (xfs_daddr_t)0) {
291 error = xfs_imap(mp, tp, ip->i_ino, &imap,
292 XFS_IMAP_LOOKUP | imap_flags);
297 * Fill in the fields in the inode that will be used to
298 * map the inode to its buffer from now on.
300 ip->i_blkno = imap.im_blkno;
301 ip->i_len = imap.im_len;
302 ip->i_boffset = imap.im_boffset;
305 * We've already mapped the inode once, so just use the
306 * mapping that we saved the first time.
308 imap.im_blkno = ip->i_blkno;
309 imap.im_len = ip->i_len;
310 imap.im_boffset = ip->i_boffset;
312 ASSERT(bno == 0 || bno == imap.im_blkno);
314 error = xfs_imap_to_bp(mp, tp, &imap, &bp, buf_flags, imap_flags);
319 ASSERT(buf_flags & XFS_BUF_TRYLOCK);
325 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
331 * Move inode type and inode format specific information from the
332 * on-disk inode to the in-core inode. For fifos, devs, and sockets
333 * this means set if_rdev to the proper value. For files, directories,
334 * and symlinks this means to bring in the in-line data or extent
335 * pointers. For a file in B-tree format, only the root is immediately
336 * brought in-core. The rest will be in-lined in if_extents when it
337 * is first referenced (see xfs_iread_extents()).
344 xfs_attr_shortform_t *atp;
348 ip->i_df.if_ext_max =
349 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
352 if (unlikely(be32_to_cpu(dip->di_core.di_nextents) +
353 be16_to_cpu(dip->di_core.di_anextents) >
354 be64_to_cpu(dip->di_core.di_nblocks))) {
355 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
356 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
357 (unsigned long long)ip->i_ino,
358 (int)(be32_to_cpu(dip->di_core.di_nextents) +
359 be16_to_cpu(dip->di_core.di_anextents)),
361 be64_to_cpu(dip->di_core.di_nblocks));
362 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW,
364 return XFS_ERROR(EFSCORRUPTED);
367 if (unlikely(dip->di_core.di_forkoff > ip->i_mount->m_sb.sb_inodesize)) {
368 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
369 "corrupt dinode %Lu, forkoff = 0x%x.",
370 (unsigned long long)ip->i_ino,
371 dip->di_core.di_forkoff);
372 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
374 return XFS_ERROR(EFSCORRUPTED);
377 switch (ip->i_d.di_mode & S_IFMT) {
382 if (unlikely(dip->di_core.di_format != XFS_DINODE_FMT_DEV)) {
383 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW,
385 return XFS_ERROR(EFSCORRUPTED);
389 ip->i_df.if_u2.if_rdev = be32_to_cpu(dip->di_u.di_dev);
395 switch (dip->di_core.di_format) {
396 case XFS_DINODE_FMT_LOCAL:
398 * no local regular files yet
400 if (unlikely((be16_to_cpu(dip->di_core.di_mode) & S_IFMT) == S_IFREG)) {
401 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
403 "(local format for regular file).",
404 (unsigned long long) ip->i_ino);
405 XFS_CORRUPTION_ERROR("xfs_iformat(4)",
408 return XFS_ERROR(EFSCORRUPTED);
411 di_size = be64_to_cpu(dip->di_core.di_size);
412 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
413 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
415 "(bad size %Ld for local inode).",
416 (unsigned long long) ip->i_ino,
417 (long long) di_size);
418 XFS_CORRUPTION_ERROR("xfs_iformat(5)",
421 return XFS_ERROR(EFSCORRUPTED);
425 error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
427 case XFS_DINODE_FMT_EXTENTS:
428 error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
430 case XFS_DINODE_FMT_BTREE:
431 error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
434 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW,
436 return XFS_ERROR(EFSCORRUPTED);
441 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
442 return XFS_ERROR(EFSCORRUPTED);
447 if (!XFS_DFORK_Q(dip))
449 ASSERT(ip->i_afp == NULL);
450 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
451 ip->i_afp->if_ext_max =
452 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
453 switch (dip->di_core.di_aformat) {
454 case XFS_DINODE_FMT_LOCAL:
455 atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
456 size = be16_to_cpu(atp->hdr.totsize);
457 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
459 case XFS_DINODE_FMT_EXTENTS:
460 error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
462 case XFS_DINODE_FMT_BTREE:
463 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
466 error = XFS_ERROR(EFSCORRUPTED);
470 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
472 xfs_idestroy_fork(ip, XFS_DATA_FORK);
478 * The file is in-lined in the on-disk inode.
479 * If it fits into if_inline_data, then copy
480 * it there, otherwise allocate a buffer for it
481 * and copy the data there. Either way, set
482 * if_data to point at the data.
483 * If we allocate a buffer for the data, make
484 * sure that its size is a multiple of 4 and
485 * record the real size in i_real_bytes.
498 * If the size is unreasonable, then something
499 * is wrong and we just bail out rather than crash in
500 * kmem_alloc() or memcpy() below.
502 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
503 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
505 "(bad size %d for local fork, size = %d).",
506 (unsigned long long) ip->i_ino, size,
507 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
508 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
510 return XFS_ERROR(EFSCORRUPTED);
512 ifp = XFS_IFORK_PTR(ip, whichfork);
515 ifp->if_u1.if_data = NULL;
516 else if (size <= sizeof(ifp->if_u2.if_inline_data))
517 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
519 real_size = roundup(size, 4);
520 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
522 ifp->if_bytes = size;
523 ifp->if_real_bytes = real_size;
525 memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size);
526 ifp->if_flags &= ~XFS_IFEXTENTS;
527 ifp->if_flags |= XFS_IFINLINE;
532 * The file consists of a set of extents all
533 * of which fit into the on-disk inode.
534 * If there are few enough extents to fit into
535 * the if_inline_ext, then copy them there.
536 * Otherwise allocate a buffer for them and copy
537 * them into it. Either way, set if_extents
538 * to point at the extents.
552 ifp = XFS_IFORK_PTR(ip, whichfork);
553 nex = XFS_DFORK_NEXTENTS(dip, whichfork);
554 size = nex * (uint)sizeof(xfs_bmbt_rec_t);
557 * If the number of extents is unreasonable, then something
558 * is wrong and we just bail out rather than crash in
559 * kmem_alloc() or memcpy() below.
561 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
562 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
563 "corrupt inode %Lu ((a)extents = %d).",
564 (unsigned long long) ip->i_ino, nex);
565 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
567 return XFS_ERROR(EFSCORRUPTED);
570 ifp->if_real_bytes = 0;
572 ifp->if_u1.if_extents = NULL;
573 else if (nex <= XFS_INLINE_EXTS)
574 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
576 xfs_iext_add(ifp, 0, nex);
578 ifp->if_bytes = size;
580 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
581 xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip));
582 for (i = 0; i < nex; i++, dp++) {
583 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
584 ep->l0 = be64_to_cpu(get_unaligned(&dp->l0));
585 ep->l1 = be64_to_cpu(get_unaligned(&dp->l1));
587 XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork);
588 if (whichfork != XFS_DATA_FORK ||
589 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
590 if (unlikely(xfs_check_nostate_extents(
592 XFS_ERROR_REPORT("xfs_iformat_extents(2)",
595 return XFS_ERROR(EFSCORRUPTED);
598 ifp->if_flags |= XFS_IFEXTENTS;
603 * The file has too many extents to fit into
604 * the inode, so they are in B-tree format.
605 * Allocate a buffer for the root of the B-tree
606 * and copy the root into it. The i_extents
607 * field will remain NULL until all of the
608 * extents are read in (when they are needed).
616 xfs_bmdr_block_t *dfp;
622 ifp = XFS_IFORK_PTR(ip, whichfork);
623 dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
624 size = XFS_BMAP_BROOT_SPACE(dfp);
625 nrecs = XFS_BMAP_BROOT_NUMRECS(dfp);
628 * blow out if -- fork has less extents than can fit in
629 * fork (fork shouldn't be a btree format), root btree
630 * block has more records than can fit into the fork,
631 * or the number of extents is greater than the number of
634 if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max
635 || XFS_BMDR_SPACE_CALC(nrecs) >
636 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)
637 || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
638 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
639 "corrupt inode %Lu (btree).",
640 (unsigned long long) ip->i_ino);
641 XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
643 return XFS_ERROR(EFSCORRUPTED);
646 ifp->if_broot_bytes = size;
647 ifp->if_broot = kmem_alloc(size, KM_SLEEP);
648 ASSERT(ifp->if_broot != NULL);
650 * Copy and convert from the on-disk structure
651 * to the in-memory structure.
653 xfs_bmdr_to_bmbt(dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
654 ifp->if_broot, size);
655 ifp->if_flags &= ~XFS_IFEXTENTS;
656 ifp->if_flags |= XFS_IFBROOT;
662 xfs_dinode_from_disk(
664 xfs_dinode_core_t *from)
666 to->di_magic = be16_to_cpu(from->di_magic);
667 to->di_mode = be16_to_cpu(from->di_mode);
668 to->di_version = from ->di_version;
669 to->di_format = from->di_format;
670 to->di_onlink = be16_to_cpu(from->di_onlink);
671 to->di_uid = be32_to_cpu(from->di_uid);
672 to->di_gid = be32_to_cpu(from->di_gid);
673 to->di_nlink = be32_to_cpu(from->di_nlink);
674 to->di_projid = be16_to_cpu(from->di_projid);
675 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
676 to->di_flushiter = be16_to_cpu(from->di_flushiter);
677 to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
678 to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec);
679 to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec);
680 to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec);
681 to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec);
682 to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec);
683 to->di_size = be64_to_cpu(from->di_size);
684 to->di_nblocks = be64_to_cpu(from->di_nblocks);
685 to->di_extsize = be32_to_cpu(from->di_extsize);
686 to->di_nextents = be32_to_cpu(from->di_nextents);
687 to->di_anextents = be16_to_cpu(from->di_anextents);
688 to->di_forkoff = from->di_forkoff;
689 to->di_aformat = from->di_aformat;
690 to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
691 to->di_dmstate = be16_to_cpu(from->di_dmstate);
692 to->di_flags = be16_to_cpu(from->di_flags);
693 to->di_gen = be32_to_cpu(from->di_gen);
698 xfs_dinode_core_t *to,
699 xfs_icdinode_t *from)
701 to->di_magic = cpu_to_be16(from->di_magic);
702 to->di_mode = cpu_to_be16(from->di_mode);
703 to->di_version = from ->di_version;
704 to->di_format = from->di_format;
705 to->di_onlink = cpu_to_be16(from->di_onlink);
706 to->di_uid = cpu_to_be32(from->di_uid);
707 to->di_gid = cpu_to_be32(from->di_gid);
708 to->di_nlink = cpu_to_be32(from->di_nlink);
709 to->di_projid = cpu_to_be16(from->di_projid);
710 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
711 to->di_flushiter = cpu_to_be16(from->di_flushiter);
712 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
713 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
714 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
715 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
716 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
717 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
718 to->di_size = cpu_to_be64(from->di_size);
719 to->di_nblocks = cpu_to_be64(from->di_nblocks);
720 to->di_extsize = cpu_to_be32(from->di_extsize);
721 to->di_nextents = cpu_to_be32(from->di_nextents);
722 to->di_anextents = cpu_to_be16(from->di_anextents);
723 to->di_forkoff = from->di_forkoff;
724 to->di_aformat = from->di_aformat;
725 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
726 to->di_dmstate = cpu_to_be16(from->di_dmstate);
727 to->di_flags = cpu_to_be16(from->di_flags);
728 to->di_gen = cpu_to_be32(from->di_gen);
737 if (di_flags & XFS_DIFLAG_ANY) {
738 if (di_flags & XFS_DIFLAG_REALTIME)
739 flags |= XFS_XFLAG_REALTIME;
740 if (di_flags & XFS_DIFLAG_PREALLOC)
741 flags |= XFS_XFLAG_PREALLOC;
742 if (di_flags & XFS_DIFLAG_IMMUTABLE)
743 flags |= XFS_XFLAG_IMMUTABLE;
744 if (di_flags & XFS_DIFLAG_APPEND)
745 flags |= XFS_XFLAG_APPEND;
746 if (di_flags & XFS_DIFLAG_SYNC)
747 flags |= XFS_XFLAG_SYNC;
748 if (di_flags & XFS_DIFLAG_NOATIME)
749 flags |= XFS_XFLAG_NOATIME;
750 if (di_flags & XFS_DIFLAG_NODUMP)
751 flags |= XFS_XFLAG_NODUMP;
752 if (di_flags & XFS_DIFLAG_RTINHERIT)
753 flags |= XFS_XFLAG_RTINHERIT;
754 if (di_flags & XFS_DIFLAG_PROJINHERIT)
755 flags |= XFS_XFLAG_PROJINHERIT;
756 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
757 flags |= XFS_XFLAG_NOSYMLINKS;
758 if (di_flags & XFS_DIFLAG_EXTSIZE)
759 flags |= XFS_XFLAG_EXTSIZE;
760 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
761 flags |= XFS_XFLAG_EXTSZINHERIT;
762 if (di_flags & XFS_DIFLAG_NODEFRAG)
763 flags |= XFS_XFLAG_NODEFRAG;
764 if (di_flags & XFS_DIFLAG_FILESTREAM)
765 flags |= XFS_XFLAG_FILESTREAM;
775 xfs_icdinode_t *dic = &ip->i_d;
777 return _xfs_dic2xflags(dic->di_flags) |
778 (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0);
785 xfs_dinode_core_t *dic = &dip->di_core;
787 return _xfs_dic2xflags(be16_to_cpu(dic->di_flags)) |
788 (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0);
792 * Given a mount structure and an inode number, return a pointer
793 * to a newly allocated in-core inode corresponding to the given
796 * Initialize the inode's attributes and extent pointers if it
797 * already has them (it will not if the inode has no links).
813 ASSERT(xfs_inode_zone != NULL);
815 ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP);
818 atomic_set(&ip->i_iocount, 0);
819 spin_lock_init(&ip->i_flags_lock);
822 * Get pointer's to the on-disk inode and the buffer containing it.
823 * If the inode number refers to a block outside the file system
824 * then xfs_itobp() will return NULL. In this case we should
825 * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will
826 * know that this is a new incore inode.
828 error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags, XFS_BUF_LOCK);
830 kmem_zone_free(xfs_inode_zone, ip);
835 * Initialize inode's trace buffers.
836 * Do this before xfs_iformat in case it adds entries.
838 #ifdef XFS_INODE_TRACE
839 ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_SLEEP);
841 #ifdef XFS_BMAP_TRACE
842 ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP);
844 #ifdef XFS_BMBT_TRACE
845 ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_SLEEP);
848 ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_SLEEP);
850 #ifdef XFS_ILOCK_TRACE
851 ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_SLEEP);
853 #ifdef XFS_DIR2_TRACE
854 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_SLEEP);
858 * If we got something that isn't an inode it means someone
859 * (nfs or dmi) has a stale handle.
861 if (be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC) {
862 kmem_zone_free(xfs_inode_zone, ip);
863 xfs_trans_brelse(tp, bp);
865 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
866 "dip->di_core.di_magic (0x%x) != "
867 "XFS_DINODE_MAGIC (0x%x)",
868 be16_to_cpu(dip->di_core.di_magic),
871 return XFS_ERROR(EINVAL);
875 * If the on-disk inode is already linked to a directory
876 * entry, copy all of the inode into the in-core inode.
877 * xfs_iformat() handles copying in the inode format
878 * specific information.
879 * Otherwise, just get the truly permanent information.
881 if (dip->di_core.di_mode) {
882 xfs_dinode_from_disk(&ip->i_d, &dip->di_core);
883 error = xfs_iformat(ip, dip);
885 kmem_zone_free(xfs_inode_zone, ip);
886 xfs_trans_brelse(tp, bp);
888 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
889 "xfs_iformat() returned error %d",
895 ip->i_d.di_magic = be16_to_cpu(dip->di_core.di_magic);
896 ip->i_d.di_version = dip->di_core.di_version;
897 ip->i_d.di_gen = be32_to_cpu(dip->di_core.di_gen);
898 ip->i_d.di_flushiter = be16_to_cpu(dip->di_core.di_flushiter);
900 * Make sure to pull in the mode here as well in
901 * case the inode is released without being used.
902 * This ensures that xfs_inactive() will see that
903 * the inode is already free and not try to mess
904 * with the uninitialized part of it.
908 * Initialize the per-fork minima and maxima for a new
909 * inode here. xfs_iformat will do it for old inodes.
911 ip->i_df.if_ext_max =
912 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
915 INIT_LIST_HEAD(&ip->i_reclaim);
918 * The inode format changed when we moved the link count and
919 * made it 32 bits long. If this is an old format inode,
920 * convert it in memory to look like a new one. If it gets
921 * flushed to disk we will convert back before flushing or
922 * logging it. We zero out the new projid field and the old link
923 * count field. We'll handle clearing the pad field (the remains
924 * of the old uuid field) when we actually convert the inode to
925 * the new format. We don't change the version number so that we
926 * can distinguish this from a real new format inode.
928 if (ip->i_d.di_version == XFS_DINODE_VERSION_1) {
929 ip->i_d.di_nlink = ip->i_d.di_onlink;
930 ip->i_d.di_onlink = 0;
931 ip->i_d.di_projid = 0;
934 ip->i_delayed_blks = 0;
935 ip->i_size = ip->i_d.di_size;
938 * Mark the buffer containing the inode as something to keep
939 * around for a while. This helps to keep recently accessed
940 * meta-data in-core longer.
942 XFS_BUF_SET_REF(bp, XFS_INO_REF);
945 * Use xfs_trans_brelse() to release the buffer containing the
946 * on-disk inode, because it was acquired with xfs_trans_read_buf()
947 * in xfs_itobp() above. If tp is NULL, this is just a normal
948 * brelse(). If we're within a transaction, then xfs_trans_brelse()
949 * will only release the buffer if it is not dirty within the
950 * transaction. It will be OK to release the buffer in this case,
951 * because inodes on disk are never destroyed and we will be
952 * locking the new in-core inode before putting it in the hash
953 * table where other processes can find it. Thus we don't have
954 * to worry about the inode being changed just because we released
957 xfs_trans_brelse(tp, bp);
963 * Read in extents from a btree-format inode.
964 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c.
974 xfs_extnum_t nextents;
977 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
978 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
980 return XFS_ERROR(EFSCORRUPTED);
982 nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
983 size = nextents * sizeof(xfs_bmbt_rec_t);
984 ifp = XFS_IFORK_PTR(ip, whichfork);
987 * We know that the size is valid (it's checked in iformat_btree)
989 ifp->if_lastex = NULLEXTNUM;
990 ifp->if_bytes = ifp->if_real_bytes = 0;
991 ifp->if_flags |= XFS_IFEXTENTS;
992 xfs_iext_add(ifp, 0, nextents);
993 error = xfs_bmap_read_extents(tp, ip, whichfork);
995 xfs_iext_destroy(ifp);
996 ifp->if_flags &= ~XFS_IFEXTENTS;
999 xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip));
1004 * Allocate an inode on disk and return a copy of its in-core version.
1005 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
1006 * appropriately within the inode. The uid and gid for the inode are
1007 * set according to the contents of the given cred structure.
1009 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
1010 * has a free inode available, call xfs_iget()
1011 * to obtain the in-core version of the allocated inode. Finally,
1012 * fill in the inode and log its initial contents. In this case,
1013 * ialloc_context would be set to NULL and call_again set to false.
1015 * If xfs_dialloc() does not have an available inode,
1016 * it will replenish its supply by doing an allocation. Since we can
1017 * only do one allocation within a transaction without deadlocks, we
1018 * must commit the current transaction before returning the inode itself.
1019 * In this case, therefore, we will set call_again to true and return.
1020 * The caller should then commit the current transaction, start a new
1021 * transaction, and call xfs_ialloc() again to actually get the inode.
1023 * To ensure that some other process does not grab the inode that
1024 * was allocated during the first call to xfs_ialloc(), this routine
1025 * also returns the [locked] bp pointing to the head of the freelist
1026 * as ialloc_context. The caller should hold this buffer across
1027 * the commit and pass it back into this routine on the second call.
1029 * If we are allocating quota inodes, we do not have a parent inode
1030 * to attach to or associate with (i.e. pip == NULL) because they
1031 * are not linked into the directory structure - they are attached
1032 * directly to the superblock - and so have no parent.
1044 xfs_buf_t **ialloc_context,
1045 boolean_t *call_again,
1055 * Call the space management code to pick
1056 * the on-disk inode to be allocated.
1058 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
1059 ialloc_context, call_again, &ino);
1063 if (*call_again || ino == NULLFSINO) {
1067 ASSERT(*ialloc_context == NULL);
1070 * Get the in-core inode with the lock held exclusively.
1071 * This is because we're setting fields here we need
1072 * to prevent others from looking at until we're done.
1074 error = xfs_trans_iget(tp->t_mountp, tp, ino,
1075 XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
1082 ip->i_d.di_mode = (__uint16_t)mode;
1083 ip->i_d.di_onlink = 0;
1084 ip->i_d.di_nlink = nlink;
1085 ASSERT(ip->i_d.di_nlink == nlink);
1086 ip->i_d.di_uid = current_fsuid(cr);
1087 ip->i_d.di_gid = current_fsgid(cr);
1088 ip->i_d.di_projid = prid;
1089 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
1092 * If the superblock version is up to where we support new format
1093 * inodes and this is currently an old format inode, then change
1094 * the inode version number now. This way we only do the conversion
1095 * here rather than here and in the flush/logging code.
1097 if (xfs_sb_version_hasnlink(&tp->t_mountp->m_sb) &&
1098 ip->i_d.di_version == XFS_DINODE_VERSION_1) {
1099 ip->i_d.di_version = XFS_DINODE_VERSION_2;
1101 * We've already zeroed the old link count, the projid field,
1102 * and the pad field.
1107 * Project ids won't be stored on disk if we are using a version 1 inode.
1109 if ((prid != 0) && (ip->i_d.di_version == XFS_DINODE_VERSION_1))
1110 xfs_bump_ino_vers2(tp, ip);
1112 if (pip && XFS_INHERIT_GID(pip)) {
1113 ip->i_d.di_gid = pip->i_d.di_gid;
1114 if ((pip->i_d.di_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) {
1115 ip->i_d.di_mode |= S_ISGID;
1120 * If the group ID of the new file does not match the effective group
1121 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
1122 * (and only if the irix_sgid_inherit compatibility variable is set).
1124 if ((irix_sgid_inherit) &&
1125 (ip->i_d.di_mode & S_ISGID) &&
1126 (!in_group_p((gid_t)ip->i_d.di_gid))) {
1127 ip->i_d.di_mode &= ~S_ISGID;
1130 ip->i_d.di_size = 0;
1132 ip->i_d.di_nextents = 0;
1133 ASSERT(ip->i_d.di_nblocks == 0);
1134 xfs_ichgtime(ip, XFS_ICHGTIME_CHG|XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD);
1136 * di_gen will have been taken care of in xfs_iread.
1138 ip->i_d.di_extsize = 0;
1139 ip->i_d.di_dmevmask = 0;
1140 ip->i_d.di_dmstate = 0;
1141 ip->i_d.di_flags = 0;
1142 flags = XFS_ILOG_CORE;
1143 switch (mode & S_IFMT) {
1148 ip->i_d.di_format = XFS_DINODE_FMT_DEV;
1149 ip->i_df.if_u2.if_rdev = rdev;
1150 ip->i_df.if_flags = 0;
1151 flags |= XFS_ILOG_DEV;
1154 if (pip && xfs_inode_is_filestream(pip)) {
1155 error = xfs_filestream_associate(pip, ip);
1159 xfs_iflags_set(ip, XFS_IFILESTREAM);
1163 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
1166 if ((mode & S_IFMT) == S_IFDIR) {
1167 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1168 di_flags |= XFS_DIFLAG_RTINHERIT;
1169 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1170 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
1171 ip->i_d.di_extsize = pip->i_d.di_extsize;
1173 } else if ((mode & S_IFMT) == S_IFREG) {
1174 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1175 di_flags |= XFS_DIFLAG_REALTIME;
1176 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1177 di_flags |= XFS_DIFLAG_EXTSIZE;
1178 ip->i_d.di_extsize = pip->i_d.di_extsize;
1181 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
1182 xfs_inherit_noatime)
1183 di_flags |= XFS_DIFLAG_NOATIME;
1184 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
1186 di_flags |= XFS_DIFLAG_NODUMP;
1187 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
1189 di_flags |= XFS_DIFLAG_SYNC;
1190 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
1191 xfs_inherit_nosymlinks)
1192 di_flags |= XFS_DIFLAG_NOSYMLINKS;
1193 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1194 di_flags |= XFS_DIFLAG_PROJINHERIT;
1195 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
1196 xfs_inherit_nodefrag)
1197 di_flags |= XFS_DIFLAG_NODEFRAG;
1198 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
1199 di_flags |= XFS_DIFLAG_FILESTREAM;
1200 ip->i_d.di_flags |= di_flags;
1204 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
1205 ip->i_df.if_flags = XFS_IFEXTENTS;
1206 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
1207 ip->i_df.if_u1.if_extents = NULL;
1213 * Attribute fork settings for new inode.
1215 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1216 ip->i_d.di_anextents = 0;
1219 * Log the new values stuffed into the inode.
1221 xfs_trans_log_inode(tp, ip, flags);
1223 /* now that we have an i_mode we can setup inode ops and unlock */
1224 xfs_initialize_vnode(tp->t_mountp, vp, ip);
1231 * Check to make sure that there are no blocks allocated to the
1232 * file beyond the size of the file. We don't check this for
1233 * files with fixed size extents or real time extents, but we
1234 * at least do it for regular files.
1243 xfs_fileoff_t map_first;
1245 xfs_bmbt_irec_t imaps[2];
1247 if ((ip->i_d.di_mode & S_IFMT) != S_IFREG)
1250 if (XFS_IS_REALTIME_INODE(ip))
1253 if (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
1257 map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
1259 * The filesystem could be shutting down, so bmapi may return
1262 if (xfs_bmapi(NULL, ip, map_first,
1264 (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) -
1266 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps,
1269 ASSERT(nimaps == 1);
1270 ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);
1275 * Calculate the last possible buffered byte in a file. This must
1276 * include data that was buffered beyond the EOF by the write code.
1277 * This also needs to deal with overflowing the xfs_fsize_t type
1278 * which can happen for sizes near the limit.
1280 * We also need to take into account any blocks beyond the EOF. It
1281 * may be the case that they were buffered by a write which failed.
1282 * In that case the pages will still be in memory, but the inode size
1283 * will never have been updated.
1290 xfs_fsize_t last_byte;
1291 xfs_fileoff_t last_block;
1292 xfs_fileoff_t size_last_block;
1295 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE | MR_ACCESS));
1299 * Only check for blocks beyond the EOF if the extents have
1300 * been read in. This eliminates the need for the inode lock,
1301 * and it also saves us from looking when it really isn't
1304 if (ip->i_df.if_flags & XFS_IFEXTENTS) {
1305 error = xfs_bmap_last_offset(NULL, ip, &last_block,
1313 size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_size);
1314 last_block = XFS_FILEOFF_MAX(last_block, size_last_block);
1316 last_byte = XFS_FSB_TO_B(mp, last_block);
1317 if (last_byte < 0) {
1318 return XFS_MAXIOFFSET(mp);
1320 last_byte += (1 << mp->m_writeio_log);
1321 if (last_byte < 0) {
1322 return XFS_MAXIOFFSET(mp);
1327 #if defined(XFS_RW_TRACE)
1333 xfs_fsize_t new_size,
1334 xfs_off_t toss_start,
1335 xfs_off_t toss_finish)
1337 if (ip->i_rwtrace == NULL) {
1341 ktrace_enter(ip->i_rwtrace,
1344 (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff),
1345 (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff),
1346 (void*)((long)flag),
1347 (void*)(unsigned long)((new_size >> 32) & 0xffffffff),
1348 (void*)(unsigned long)(new_size & 0xffffffff),
1349 (void*)(unsigned long)((toss_start >> 32) & 0xffffffff),
1350 (void*)(unsigned long)(toss_start & 0xffffffff),
1351 (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff),
1352 (void*)(unsigned long)(toss_finish & 0xffffffff),
1353 (void*)(unsigned long)current_cpu(),
1354 (void*)(unsigned long)current_pid(),
1360 #define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish)
1364 * Start the truncation of the file to new_size. The new size
1365 * must be smaller than the current size. This routine will
1366 * clear the buffer and page caches of file data in the removed
1367 * range, and xfs_itruncate_finish() will remove the underlying
1370 * The inode must have its I/O lock locked EXCLUSIVELY, and it
1371 * must NOT have the inode lock held at all. This is because we're
1372 * calling into the buffer/page cache code and we can't hold the
1373 * inode lock when we do so.
1375 * We need to wait for any direct I/Os in flight to complete before we
1376 * proceed with the truncate. This is needed to prevent the extents
1377 * being read or written by the direct I/Os from being removed while the
1378 * I/O is in flight as there is no other method of synchronising
1379 * direct I/O with the truncate operation. Also, because we hold
1380 * the IOLOCK in exclusive mode, we prevent new direct I/Os from being
1381 * started until the truncate completes and drops the lock. Essentially,
1382 * the vn_iowait() call forms an I/O barrier that provides strict ordering
1383 * between direct I/Os and the truncate operation.
1385 * The flags parameter can have either the value XFS_ITRUNC_DEFINITE
1386 * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used
1387 * in the case that the caller is locking things out of order and
1388 * may not be able to call xfs_itruncate_finish() with the inode lock
1389 * held without dropping the I/O lock. If the caller must drop the
1390 * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start()
1391 * must be called again with all the same restrictions as the initial
1395 xfs_itruncate_start(
1398 xfs_fsize_t new_size)
1400 xfs_fsize_t last_byte;
1401 xfs_off_t toss_start;
1406 ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0);
1407 ASSERT((new_size == 0) || (new_size <= ip->i_size));
1408 ASSERT((flags == XFS_ITRUNC_DEFINITE) ||
1409 (flags == XFS_ITRUNC_MAYBE));
1414 /* wait for the completion of any pending DIOs */
1415 if (new_size < ip->i_size)
1419 * Call toss_pages or flushinval_pages to get rid of pages
1420 * overlapping the region being removed. We have to use
1421 * the less efficient flushinval_pages in the case that the
1422 * caller may not be able to finish the truncate without
1423 * dropping the inode's I/O lock. Make sure
1424 * to catch any pages brought in by buffers overlapping
1425 * the EOF by searching out beyond the isize by our
1426 * block size. We round new_size up to a block boundary
1427 * so that we don't toss things on the same block as
1428 * new_size but before it.
1430 * Before calling toss_page or flushinval_pages, make sure to
1431 * call remapf() over the same region if the file is mapped.
1432 * This frees up mapped file references to the pages in the
1433 * given range and for the flushinval_pages case it ensures
1434 * that we get the latest mapped changes flushed out.
1436 toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1437 toss_start = XFS_FSB_TO_B(mp, toss_start);
1438 if (toss_start < 0) {
1440 * The place to start tossing is beyond our maximum
1441 * file size, so there is no way that the data extended
1446 last_byte = xfs_file_last_byte(ip);
1447 xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start,
1449 if (last_byte > toss_start) {
1450 if (flags & XFS_ITRUNC_DEFINITE) {
1451 xfs_tosspages(ip, toss_start,
1452 -1, FI_REMAPF_LOCKED);
1454 error = xfs_flushinval_pages(ip, toss_start,
1455 -1, FI_REMAPF_LOCKED);
1460 if (new_size == 0) {
1461 ASSERT(VN_CACHED(vp) == 0);
1468 * Shrink the file to the given new_size. The new
1469 * size must be smaller than the current size.
1470 * This will free up the underlying blocks
1471 * in the removed range after a call to xfs_itruncate_start()
1472 * or xfs_atruncate_start().
1474 * The transaction passed to this routine must have made
1475 * a permanent log reservation of at least XFS_ITRUNCATE_LOG_RES.
1476 * This routine may commit the given transaction and
1477 * start new ones, so make sure everything involved in
1478 * the transaction is tidy before calling here.
1479 * Some transaction will be returned to the caller to be
1480 * committed. The incoming transaction must already include
1481 * the inode, and both inode locks must be held exclusively.
1482 * The inode must also be "held" within the transaction. On
1483 * return the inode will be "held" within the returned transaction.
1484 * This routine does NOT require any disk space to be reserved
1485 * for it within the transaction.
1487 * The fork parameter must be either xfs_attr_fork or xfs_data_fork,
1488 * and it indicates the fork which is to be truncated. For the
1489 * attribute fork we only support truncation to size 0.
1491 * We use the sync parameter to indicate whether or not the first
1492 * transaction we perform might have to be synchronous. For the attr fork,
1493 * it needs to be so if the unlink of the inode is not yet known to be
1494 * permanent in the log. This keeps us from freeing and reusing the
1495 * blocks of the attribute fork before the unlink of the inode becomes
1498 * For the data fork, we normally have to run synchronously if we're
1499 * being called out of the inactive path or we're being called
1500 * out of the create path where we're truncating an existing file.
1501 * Either way, the truncate needs to be sync so blocks don't reappear
1502 * in the file with altered data in case of a crash. wsync filesystems
1503 * can run the first case async because anything that shrinks the inode
1504 * has to run sync so by the time we're called here from inactive, the
1505 * inode size is permanently set to 0.
1507 * Calls from the truncate path always need to be sync unless we're
1508 * in a wsync filesystem and the file has already been unlinked.
1510 * The caller is responsible for correctly setting the sync parameter.
1511 * It gets too hard for us to guess here which path we're being called
1512 * out of just based on inode state.
1515 xfs_itruncate_finish(
1518 xfs_fsize_t new_size,
1522 xfs_fsblock_t first_block;
1523 xfs_fileoff_t first_unmap_block;
1524 xfs_fileoff_t last_block;
1525 xfs_filblks_t unmap_len=0;
1530 xfs_bmap_free_t free_list;
1533 ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0);
1534 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
1535 ASSERT((new_size == 0) || (new_size <= ip->i_size));
1536 ASSERT(*tp != NULL);
1537 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
1538 ASSERT(ip->i_transp == *tp);
1539 ASSERT(ip->i_itemp != NULL);
1540 ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD);
1544 mp = (ntp)->t_mountp;
1545 ASSERT(! XFS_NOT_DQATTACHED(mp, ip));
1548 * We only support truncating the entire attribute fork.
1550 if (fork == XFS_ATTR_FORK) {
1553 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1554 xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0);
1556 * The first thing we do is set the size to new_size permanently
1557 * on disk. This way we don't have to worry about anyone ever
1558 * being able to look at the data being freed even in the face
1559 * of a crash. What we're getting around here is the case where
1560 * we free a block, it is allocated to another file, it is written
1561 * to, and then we crash. If the new data gets written to the
1562 * file but the log buffers containing the free and reallocation
1563 * don't, then we'd end up with garbage in the blocks being freed.
1564 * As long as we make the new_size permanent before actually
1565 * freeing any blocks it doesn't matter if they get writtten to.
1567 * The callers must signal into us whether or not the size
1568 * setting here must be synchronous. There are a few cases
1569 * where it doesn't have to be synchronous. Those cases
1570 * occur if the file is unlinked and we know the unlink is
1571 * permanent or if the blocks being truncated are guaranteed
1572 * to be beyond the inode eof (regardless of the link count)
1573 * and the eof value is permanent. Both of these cases occur
1574 * only on wsync-mounted filesystems. In those cases, we're
1575 * guaranteed that no user will ever see the data in the blocks
1576 * that are being truncated so the truncate can run async.
1577 * In the free beyond eof case, the file may wind up with
1578 * more blocks allocated to it than it needs if we crash
1579 * and that won't get fixed until the next time the file
1580 * is re-opened and closed but that's ok as that shouldn't
1581 * be too many blocks.
1583 * However, we can't just make all wsync xactions run async
1584 * because there's one call out of the create path that needs
1585 * to run sync where it's truncating an existing file to size
1586 * 0 whose size is > 0.
1588 * It's probably possible to come up with a test in this
1589 * routine that would correctly distinguish all the above
1590 * cases from the values of the function parameters and the
1591 * inode state but for sanity's sake, I've decided to let the
1592 * layers above just tell us. It's simpler to correctly figure
1593 * out in the layer above exactly under what conditions we
1594 * can run async and I think it's easier for others read and
1595 * follow the logic in case something has to be changed.
1596 * cscope is your friend -- rcc.
1598 * The attribute fork is much simpler.
1600 * For the attribute fork we allow the caller to tell us whether
1601 * the unlink of the inode that led to this call is yet permanent
1602 * in the on disk log. If it is not and we will be freeing extents
1603 * in this inode then we make the first transaction synchronous
1604 * to make sure that the unlink is permanent by the time we free
1607 if (fork == XFS_DATA_FORK) {
1608 if (ip->i_d.di_nextents > 0) {
1610 * If we are not changing the file size then do
1611 * not update the on-disk file size - we may be
1612 * called from xfs_inactive_free_eofblocks(). If we
1613 * update the on-disk file size and then the system
1614 * crashes before the contents of the file are
1615 * flushed to disk then the files may be full of
1616 * holes (ie NULL files bug).
1618 if (ip->i_size != new_size) {
1619 ip->i_d.di_size = new_size;
1620 ip->i_size = new_size;
1621 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1625 ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC));
1626 if (ip->i_d.di_anextents > 0)
1627 xfs_trans_set_sync(ntp);
1629 ASSERT(fork == XFS_DATA_FORK ||
1630 (fork == XFS_ATTR_FORK &&
1631 ((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) ||
1632 (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC)))));
1635 * Since it is possible for space to become allocated beyond
1636 * the end of the file (in a crash where the space is allocated
1637 * but the inode size is not yet updated), simply remove any
1638 * blocks which show up between the new EOF and the maximum
1639 * possible file size. If the first block to be removed is
1640 * beyond the maximum file size (ie it is the same as last_block),
1641 * then there is nothing to do.
1643 last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
1644 ASSERT(first_unmap_block <= last_block);
1646 if (last_block == first_unmap_block) {
1649 unmap_len = last_block - first_unmap_block + 1;
1653 * Free up up to XFS_ITRUNC_MAX_EXTENTS. xfs_bunmapi()
1654 * will tell us whether it freed the entire range or
1655 * not. If this is a synchronous mount (wsync),
1656 * then we can tell bunmapi to keep all the
1657 * transactions asynchronous since the unlink
1658 * transaction that made this inode inactive has
1659 * already hit the disk. There's no danger of
1660 * the freed blocks being reused, there being a
1661 * crash, and the reused blocks suddenly reappearing
1662 * in this file with garbage in them once recovery
1665 XFS_BMAP_INIT(&free_list, &first_block);
1666 error = xfs_bunmapi(ntp, ip,
1667 first_unmap_block, unmap_len,
1668 XFS_BMAPI_AFLAG(fork) |
1669 (sync ? 0 : XFS_BMAPI_ASYNC),
1670 XFS_ITRUNC_MAX_EXTENTS,
1671 &first_block, &free_list,
1675 * If the bunmapi call encounters an error,
1676 * return to the caller where the transaction
1677 * can be properly aborted. We just need to
1678 * make sure we're not holding any resources
1679 * that we were not when we came in.
1681 xfs_bmap_cancel(&free_list);
1686 * Duplicate the transaction that has the permanent
1687 * reservation and commit the old transaction.
1689 error = xfs_bmap_finish(tp, &free_list, &committed);
1693 * If the bmap finish call encounters an error,
1694 * return to the caller where the transaction
1695 * can be properly aborted. We just need to
1696 * make sure we're not holding any resources
1697 * that we were not when we came in.
1699 * Aborting from this point might lose some
1700 * blocks in the file system, but oh well.
1702 xfs_bmap_cancel(&free_list);
1705 * If the passed in transaction committed
1706 * in xfs_bmap_finish(), then we want to
1707 * add the inode to this one before returning.
1708 * This keeps things simple for the higher
1709 * level code, because it always knows that
1710 * the inode is locked and held in the
1711 * transaction that returns to it whether
1712 * errors occur or not. We don't mark the
1713 * inode dirty so that this transaction can
1714 * be easily aborted if possible.
1716 xfs_trans_ijoin(ntp, ip,
1717 XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1718 xfs_trans_ihold(ntp, ip);
1725 * The first xact was committed,
1726 * so add the inode to the new one.
1727 * Mark it dirty so it will be logged
1728 * and moved forward in the log as
1729 * part of every commit.
1731 xfs_trans_ijoin(ntp, ip,
1732 XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1733 xfs_trans_ihold(ntp, ip);
1734 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1736 ntp = xfs_trans_dup(ntp);
1737 (void) xfs_trans_commit(*tp, 0);
1739 error = xfs_trans_reserve(ntp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
1740 XFS_TRANS_PERM_LOG_RES,
1741 XFS_ITRUNCATE_LOG_COUNT);
1743 * Add the inode being truncated to the next chained
1746 xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1747 xfs_trans_ihold(ntp, ip);
1752 * Only update the size in the case of the data fork, but
1753 * always re-log the inode so that our permanent transaction
1754 * can keep on rolling it forward in the log.
1756 if (fork == XFS_DATA_FORK) {
1757 xfs_isize_check(mp, ip, new_size);
1759 * If we are not changing the file size then do
1760 * not update the on-disk file size - we may be
1761 * called from xfs_inactive_free_eofblocks(). If we
1762 * update the on-disk file size and then the system
1763 * crashes before the contents of the file are
1764 * flushed to disk then the files may be full of
1765 * holes (ie NULL files bug).
1767 if (ip->i_size != new_size) {
1768 ip->i_d.di_size = new_size;
1769 ip->i_size = new_size;
1772 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1773 ASSERT((new_size != 0) ||
1774 (fork == XFS_ATTR_FORK) ||
1775 (ip->i_delayed_blks == 0));
1776 ASSERT((new_size != 0) ||
1777 (fork == XFS_ATTR_FORK) ||
1778 (ip->i_d.di_nextents == 0));
1779 xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0);
1787 * Do the first part of growing a file: zero any data in the last
1788 * block that is beyond the old EOF. We need to do this before
1789 * the inode is joined to the transaction to modify the i_size.
1790 * That way we can drop the inode lock and call into the buffer
1791 * cache to get the buffer mapping the EOF.
1796 xfs_fsize_t new_size,
1799 ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
1800 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
1801 ASSERT(new_size > ip->i_size);
1804 * Zero any pages that may have been created by
1805 * xfs_write_file() beyond the end of the file
1806 * and any blocks between the old and new file sizes.
1808 return xfs_zero_eof(ip, new_size, ip->i_size);
1814 * This routine is called to extend the size of a file.
1815 * The inode must have both the iolock and the ilock locked
1816 * for update and it must be a part of the current transaction.
1817 * The xfs_igrow_start() function must have been called previously.
1818 * If the change_flag is not zero, the inode change timestamp will
1825 xfs_fsize_t new_size,
1828 ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
1829 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
1830 ASSERT(ip->i_transp == tp);
1831 ASSERT(new_size > ip->i_size);
1834 * Update the file size. Update the inode change timestamp
1835 * if change_flag set.
1837 ip->i_d.di_size = new_size;
1838 ip->i_size = new_size;
1840 xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
1841 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1847 * This is called when the inode's link count goes to 0.
1848 * We place the on-disk inode on a list in the AGI. It
1849 * will be pulled from this list when the inode is freed.
1861 xfs_agnumber_t agno;
1862 xfs_daddr_t agdaddr;
1869 ASSERT(ip->i_d.di_nlink == 0);
1870 ASSERT(ip->i_d.di_mode != 0);
1871 ASSERT(ip->i_transp == tp);
1875 agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
1876 agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
1879 * Get the agi buffer first. It ensures lock ordering
1882 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr,
1883 XFS_FSS_TO_BB(mp, 1), 0, &agibp);
1888 * Validate the magic number of the agi block.
1890 agi = XFS_BUF_TO_AGI(agibp);
1892 be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
1893 XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum));
1894 if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK,
1895 XFS_RANDOM_IUNLINK))) {
1896 XFS_CORRUPTION_ERROR("xfs_iunlink", XFS_ERRLEVEL_LOW, mp, agi);
1897 xfs_trans_brelse(tp, agibp);
1898 return XFS_ERROR(EFSCORRUPTED);
1901 * Get the index into the agi hash table for the
1902 * list this inode will go on.
1904 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1906 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1907 ASSERT(agi->agi_unlinked[bucket_index]);
1908 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
1910 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) {
1912 * There is already another inode in the bucket we need
1913 * to add ourselves to. Add us at the front of the list.
1914 * Here we put the head pointer into our next pointer,
1915 * and then we fall through to point the head at us.
1917 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK);
1921 ASSERT(be32_to_cpu(dip->di_next_unlinked) == NULLAGINO);
1922 /* both on-disk, don't endian flip twice */
1923 dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
1924 offset = ip->i_boffset +
1925 offsetof(xfs_dinode_t, di_next_unlinked);
1926 xfs_trans_inode_buf(tp, ibp);
1927 xfs_trans_log_buf(tp, ibp, offset,
1928 (offset + sizeof(xfs_agino_t) - 1));
1929 xfs_inobp_check(mp, ibp);
1933 * Point the bucket head pointer at the inode being inserted.
1936 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
1937 offset = offsetof(xfs_agi_t, agi_unlinked) +
1938 (sizeof(xfs_agino_t) * bucket_index);
1939 xfs_trans_log_buf(tp, agibp, offset,
1940 (offset + sizeof(xfs_agino_t) - 1));
1945 * Pull the on-disk inode from the AGI unlinked list.
1958 xfs_agnumber_t agno;
1959 xfs_daddr_t agdaddr;
1961 xfs_agino_t next_agino;
1962 xfs_buf_t *last_ibp;
1963 xfs_dinode_t *last_dip = NULL;
1965 int offset, last_offset = 0;
1970 * First pull the on-disk inode from the AGI unlinked list.
1974 agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
1975 agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
1978 * Get the agi buffer first. It ensures lock ordering
1981 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr,
1982 XFS_FSS_TO_BB(mp, 1), 0, &agibp);
1985 "xfs_iunlink_remove: xfs_trans_read_buf() returned an error %d on %s. Returning error.",
1986 error, mp->m_fsname);
1990 * Validate the magic number of the agi block.
1992 agi = XFS_BUF_TO_AGI(agibp);
1994 be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
1995 XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum));
1996 if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK_REMOVE,
1997 XFS_RANDOM_IUNLINK_REMOVE))) {
1998 XFS_CORRUPTION_ERROR("xfs_iunlink_remove", XFS_ERRLEVEL_LOW,
2000 xfs_trans_brelse(tp, agibp);
2002 "xfs_iunlink_remove: XFS_TEST_ERROR() returned an error on %s. Returning EFSCORRUPTED.",
2004 return XFS_ERROR(EFSCORRUPTED);
2007 * Get the index into the agi hash table for the
2008 * list this inode will go on.
2010 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2012 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2013 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO);
2014 ASSERT(agi->agi_unlinked[bucket_index]);
2016 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
2018 * We're at the head of the list. Get the inode's
2019 * on-disk buffer to see if there is anyone after us
2020 * on the list. Only modify our next pointer if it
2021 * is not already NULLAGINO. This saves us the overhead
2022 * of dealing with the buffer when there is no need to
2025 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK);
2028 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
2029 error, mp->m_fsname);
2032 next_agino = be32_to_cpu(dip->di_next_unlinked);
2033 ASSERT(next_agino != 0);
2034 if (next_agino != NULLAGINO) {
2035 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
2036 offset = ip->i_boffset +
2037 offsetof(xfs_dinode_t, di_next_unlinked);
2038 xfs_trans_inode_buf(tp, ibp);
2039 xfs_trans_log_buf(tp, ibp, offset,
2040 (offset + sizeof(xfs_agino_t) - 1));
2041 xfs_inobp_check(mp, ibp);
2043 xfs_trans_brelse(tp, ibp);
2046 * Point the bucket head pointer at the next inode.
2048 ASSERT(next_agino != 0);
2049 ASSERT(next_agino != agino);
2050 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
2051 offset = offsetof(xfs_agi_t, agi_unlinked) +
2052 (sizeof(xfs_agino_t) * bucket_index);
2053 xfs_trans_log_buf(tp, agibp, offset,
2054 (offset + sizeof(xfs_agino_t) - 1));
2057 * We need to search the list for the inode being freed.
2059 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2061 while (next_agino != agino) {
2063 * If the last inode wasn't the one pointing to
2064 * us, then release its buffer since we're not
2065 * going to do anything with it.
2067 if (last_ibp != NULL) {
2068 xfs_trans_brelse(tp, last_ibp);
2070 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
2071 error = xfs_inotobp(mp, tp, next_ino, &last_dip,
2072 &last_ibp, &last_offset);
2075 "xfs_iunlink_remove: xfs_inotobp() returned an error %d on %s. Returning error.",
2076 error, mp->m_fsname);
2079 next_agino = be32_to_cpu(last_dip->di_next_unlinked);
2080 ASSERT(next_agino != NULLAGINO);
2081 ASSERT(next_agino != 0);
2084 * Now last_ibp points to the buffer previous to us on
2085 * the unlinked list. Pull us from the list.
2087 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK);
2090 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
2091 error, mp->m_fsname);
2094 next_agino = be32_to_cpu(dip->di_next_unlinked);
2095 ASSERT(next_agino != 0);
2096 ASSERT(next_agino != agino);
2097 if (next_agino != NULLAGINO) {
2098 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
2099 offset = ip->i_boffset +
2100 offsetof(xfs_dinode_t, di_next_unlinked);
2101 xfs_trans_inode_buf(tp, ibp);
2102 xfs_trans_log_buf(tp, ibp, offset,
2103 (offset + sizeof(xfs_agino_t) - 1));
2104 xfs_inobp_check(mp, ibp);
2106 xfs_trans_brelse(tp, ibp);
2109 * Point the previous inode on the list to the next inode.
2111 last_dip->di_next_unlinked = cpu_to_be32(next_agino);
2112 ASSERT(next_agino != 0);
2113 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
2114 xfs_trans_inode_buf(tp, last_ibp);
2115 xfs_trans_log_buf(tp, last_ibp, offset,
2116 (offset + sizeof(xfs_agino_t) - 1));
2117 xfs_inobp_check(mp, last_ibp);
2122 STATIC_INLINE int xfs_inode_clean(xfs_inode_t *ip)
2124 return (((ip->i_itemp == NULL) ||
2125 !(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL)) &&
2126 (ip->i_update_core == 0));
2131 xfs_inode_t *free_ip,
2135 xfs_mount_t *mp = free_ip->i_mount;
2136 int blks_per_cluster;
2139 int i, j, found, pre_flushed;
2142 xfs_inode_t *ip, **ip_found;
2143 xfs_inode_log_item_t *iip;
2144 xfs_log_item_t *lip;
2145 xfs_perag_t *pag = xfs_get_perag(mp, inum);
2147 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
2148 blks_per_cluster = 1;
2149 ninodes = mp->m_sb.sb_inopblock;
2150 nbufs = XFS_IALLOC_BLOCKS(mp);
2152 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
2153 mp->m_sb.sb_blocksize;
2154 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
2155 nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
2158 ip_found = kmem_alloc(ninodes * sizeof(xfs_inode_t *), KM_NOFS);
2160 for (j = 0; j < nbufs; j++, inum += ninodes) {
2161 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2162 XFS_INO_TO_AGBNO(mp, inum));
2166 * Look for each inode in memory and attempt to lock it,
2167 * we can be racing with flush and tail pushing here.
2168 * any inode we get the locks on, add to an array of
2169 * inode items to process later.
2171 * The get the buffer lock, we could beat a flush
2172 * or tail pushing thread to the lock here, in which
2173 * case they will go looking for the inode buffer
2174 * and fail, we need some other form of interlock
2178 for (i = 0; i < ninodes; i++) {
2179 read_lock(&pag->pag_ici_lock);
2180 ip = radix_tree_lookup(&pag->pag_ici_root,
2181 XFS_INO_TO_AGINO(mp, (inum + i)));
2183 /* Inode not in memory or we found it already,
2186 if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) {
2187 read_unlock(&pag->pag_ici_lock);
2191 if (xfs_inode_clean(ip)) {
2192 read_unlock(&pag->pag_ici_lock);
2196 /* If we can get the locks then add it to the
2197 * list, otherwise by the time we get the bp lock
2198 * below it will already be attached to the
2202 /* This inode will already be locked - by us, lets
2206 if (ip == free_ip) {
2207 if (xfs_iflock_nowait(ip)) {
2208 xfs_iflags_set(ip, XFS_ISTALE);
2209 if (xfs_inode_clean(ip)) {
2212 ip_found[found++] = ip;
2215 read_unlock(&pag->pag_ici_lock);
2219 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2220 if (xfs_iflock_nowait(ip)) {
2221 xfs_iflags_set(ip, XFS_ISTALE);
2223 if (xfs_inode_clean(ip)) {
2225 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2227 ip_found[found++] = ip;
2230 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2233 read_unlock(&pag->pag_ici_lock);
2236 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2237 mp->m_bsize * blks_per_cluster,
2241 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
2243 if (lip->li_type == XFS_LI_INODE) {
2244 iip = (xfs_inode_log_item_t *)lip;
2245 ASSERT(iip->ili_logged == 1);
2246 lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done;
2247 spin_lock(&mp->m_ail_lock);
2248 iip->ili_flush_lsn = iip->ili_item.li_lsn;
2249 spin_unlock(&mp->m_ail_lock);
2250 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
2253 lip = lip->li_bio_list;
2256 for (i = 0; i < found; i++) {
2261 ip->i_update_core = 0;
2263 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2267 iip->ili_last_fields = iip->ili_format.ilf_fields;
2268 iip->ili_format.ilf_fields = 0;
2269 iip->ili_logged = 1;
2270 spin_lock(&mp->m_ail_lock);
2271 iip->ili_flush_lsn = iip->ili_item.li_lsn;
2272 spin_unlock(&mp->m_ail_lock);
2274 xfs_buf_attach_iodone(bp,
2275 (void(*)(xfs_buf_t*,xfs_log_item_t*))
2276 xfs_istale_done, (xfs_log_item_t *)iip);
2277 if (ip != free_ip) {
2278 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2282 if (found || pre_flushed)
2283 xfs_trans_stale_inode_buf(tp, bp);
2284 xfs_trans_binval(tp, bp);
2287 kmem_free(ip_found, ninodes * sizeof(xfs_inode_t *));
2288 xfs_put_perag(mp, pag);
2292 * This is called to return an inode to the inode free list.
2293 * The inode should already be truncated to 0 length and have
2294 * no pages associated with it. This routine also assumes that
2295 * the inode is already a part of the transaction.
2297 * The on-disk copy of the inode will have been added to the list
2298 * of unlinked inodes in the AGI. We need to remove the inode from
2299 * that list atomically with respect to freeing it here.
2305 xfs_bmap_free_t *flist)
2309 xfs_ino_t first_ino;
2313 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
2314 ASSERT(ip->i_transp == tp);
2315 ASSERT(ip->i_d.di_nlink == 0);
2316 ASSERT(ip->i_d.di_nextents == 0);
2317 ASSERT(ip->i_d.di_anextents == 0);
2318 ASSERT((ip->i_d.di_size == 0 && ip->i_size == 0) ||
2319 ((ip->i_d.di_mode & S_IFMT) != S_IFREG));
2320 ASSERT(ip->i_d.di_nblocks == 0);
2323 * Pull the on-disk inode from the AGI unlinked list.
2325 error = xfs_iunlink_remove(tp, ip);
2330 error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
2334 ip->i_d.di_mode = 0; /* mark incore inode as free */
2335 ip->i_d.di_flags = 0;
2336 ip->i_d.di_dmevmask = 0;
2337 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
2338 ip->i_df.if_ext_max =
2339 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
2340 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2341 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2343 * Bump the generation count so no one will be confused
2344 * by reincarnations of this inode.
2348 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2350 error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK);
2355 * Clear the on-disk di_mode. This is to prevent xfs_bulkstat
2356 * from picking up this inode when it is reclaimed (its incore state
2357 * initialzed but not flushed to disk yet). The in-core di_mode is
2358 * already cleared and a corresponding transaction logged.
2359 * The hack here just synchronizes the in-core to on-disk
2360 * di_mode value in advance before the actual inode sync to disk.
2361 * This is OK because the inode is already unlinked and would never
2362 * change its di_mode again for this inode generation.
2363 * This is a temporary hack that would require a proper fix
2366 dip->di_core.di_mode = 0;
2369 xfs_ifree_cluster(ip, tp, first_ino);
2376 * Reallocate the space for if_broot based on the number of records
2377 * being added or deleted as indicated in rec_diff. Move the records
2378 * and pointers in if_broot to fit the new size. When shrinking this
2379 * will eliminate holes between the records and pointers created by
2380 * the caller. When growing this will create holes to be filled in
2383 * The caller must not request to add more records than would fit in
2384 * the on-disk inode root. If the if_broot is currently NULL, then
2385 * if we adding records one will be allocated. The caller must also
2386 * not request that the number of records go below zero, although
2387 * it can go to zero.
2389 * ip -- the inode whose if_broot area is changing
2390 * ext_diff -- the change in the number of records, positive or negative,
2391 * requested for the if_broot array.
2401 xfs_bmbt_block_t *new_broot;
2408 * Handle the degenerate case quietly.
2410 if (rec_diff == 0) {
2414 ifp = XFS_IFORK_PTR(ip, whichfork);
2417 * If there wasn't any memory allocated before, just
2418 * allocate it now and get out.
2420 if (ifp->if_broot_bytes == 0) {
2421 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff);
2422 ifp->if_broot = (xfs_bmbt_block_t*)kmem_alloc(new_size,
2424 ifp->if_broot_bytes = (int)new_size;
2429 * If there is already an existing if_broot, then we need
2430 * to realloc() it and shift the pointers to their new
2431 * location. The records don't change location because
2432 * they are kept butted up against the btree block header.
2434 cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes);
2435 new_max = cur_max + rec_diff;
2436 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
2437 ifp->if_broot = (xfs_bmbt_block_t *)
2438 kmem_realloc(ifp->if_broot,
2440 (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */
2442 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2443 ifp->if_broot_bytes);
2444 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2446 ifp->if_broot_bytes = (int)new_size;
2447 ASSERT(ifp->if_broot_bytes <=
2448 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2449 memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t));
2454 * rec_diff is less than 0. In this case, we are shrinking the
2455 * if_broot buffer. It must already exist. If we go to zero
2456 * records, just get rid of the root and clear the status bit.
2458 ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
2459 cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes);
2460 new_max = cur_max + rec_diff;
2461 ASSERT(new_max >= 0);
2463 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
2467 new_broot = (xfs_bmbt_block_t *)kmem_alloc(new_size, KM_SLEEP);
2469 * First copy over the btree block header.
2471 memcpy(new_broot, ifp->if_broot, sizeof(xfs_bmbt_block_t));
2474 ifp->if_flags &= ~XFS_IFBROOT;
2478 * Only copy the records and pointers if there are any.
2482 * First copy the records.
2484 op = (char *)XFS_BMAP_BROOT_REC_ADDR(ifp->if_broot, 1,
2485 ifp->if_broot_bytes);
2486 np = (char *)XFS_BMAP_BROOT_REC_ADDR(new_broot, 1,
2488 memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
2491 * Then copy the pointers.
2493 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2494 ifp->if_broot_bytes);
2495 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(new_broot, 1,
2497 memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t));
2499 kmem_free(ifp->if_broot, ifp->if_broot_bytes);
2500 ifp->if_broot = new_broot;
2501 ifp->if_broot_bytes = (int)new_size;
2502 ASSERT(ifp->if_broot_bytes <=
2503 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2509 * This is called when the amount of space needed for if_data
2510 * is increased or decreased. The change in size is indicated by
2511 * the number of bytes that need to be added or deleted in the
2512 * byte_diff parameter.
2514 * If the amount of space needed has decreased below the size of the
2515 * inline buffer, then switch to using the inline buffer. Otherwise,
2516 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
2517 * to what is needed.
2519 * ip -- the inode whose if_data area is changing
2520 * byte_diff -- the change in the number of bytes, positive or negative,
2521 * requested for the if_data array.
2533 if (byte_diff == 0) {
2537 ifp = XFS_IFORK_PTR(ip, whichfork);
2538 new_size = (int)ifp->if_bytes + byte_diff;
2539 ASSERT(new_size >= 0);
2541 if (new_size == 0) {
2542 if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2543 kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
2545 ifp->if_u1.if_data = NULL;
2547 } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) {
2549 * If the valid extents/data can fit in if_inline_ext/data,
2550 * copy them from the malloc'd vector and free it.
2552 if (ifp->if_u1.if_data == NULL) {
2553 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2554 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2555 ASSERT(ifp->if_real_bytes != 0);
2556 memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
2558 kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
2559 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2564 * Stuck with malloc/realloc.
2565 * For inline data, the underlying buffer must be
2566 * a multiple of 4 bytes in size so that it can be
2567 * logged and stay on word boundaries. We enforce
2570 real_size = roundup(new_size, 4);
2571 if (ifp->if_u1.if_data == NULL) {
2572 ASSERT(ifp->if_real_bytes == 0);
2573 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
2574 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2576 * Only do the realloc if the underlying size
2577 * is really changing.
2579 if (ifp->if_real_bytes != real_size) {
2580 ifp->if_u1.if_data =
2581 kmem_realloc(ifp->if_u1.if_data,
2587 ASSERT(ifp->if_real_bytes == 0);
2588 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
2589 memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
2593 ifp->if_real_bytes = real_size;
2594 ifp->if_bytes = new_size;
2595 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2602 * Map inode to disk block and offset.
2604 * mp -- the mount point structure for the current file system
2605 * tp -- the current transaction
2606 * ino -- the inode number of the inode to be located
2607 * imap -- this structure is filled in with the information necessary
2608 * to retrieve the given inode from disk
2609 * flags -- flags to pass to xfs_dilocate indicating whether or not
2610 * lookups in the inode btree were OK or not
2620 xfs_fsblock_t fsbno;
2625 fsbno = imap->im_blkno ?
2626 XFS_DADDR_TO_FSB(mp, imap->im_blkno) : NULLFSBLOCK;
2627 error = xfs_dilocate(mp, tp, ino, &fsbno, &len, &off, flags);
2631 imap->im_blkno = XFS_FSB_TO_DADDR(mp, fsbno);
2632 imap->im_len = XFS_FSB_TO_BB(mp, len);
2633 imap->im_agblkno = XFS_FSB_TO_AGBNO(mp, fsbno);
2634 imap->im_ioffset = (ushort)off;
2635 imap->im_boffset = (ushort)(off << mp->m_sb.sb_inodelog);
2638 * If the inode number maps to a block outside the bounds
2639 * of the file system then return NULL rather than calling
2640 * read_buf and panicing when we get an error from the
2643 if ((imap->im_blkno + imap->im_len) >
2644 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
2645 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
2646 "(imap->im_blkno (0x%llx) + imap->im_len (0x%llx)) > "
2647 " XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) (0x%llx)",
2648 (unsigned long long) imap->im_blkno,
2649 (unsigned long long) imap->im_len,
2650 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
2663 ifp = XFS_IFORK_PTR(ip, whichfork);
2664 if (ifp->if_broot != NULL) {
2665 kmem_free(ifp->if_broot, ifp->if_broot_bytes);
2666 ifp->if_broot = NULL;
2670 * If the format is local, then we can't have an extents
2671 * array so just look for an inline data array. If we're
2672 * not local then we may or may not have an extents list,
2673 * so check and free it up if we do.
2675 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
2676 if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) &&
2677 (ifp->if_u1.if_data != NULL)) {
2678 ASSERT(ifp->if_real_bytes != 0);
2679 kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
2680 ifp->if_u1.if_data = NULL;
2681 ifp->if_real_bytes = 0;
2683 } else if ((ifp->if_flags & XFS_IFEXTENTS) &&
2684 ((ifp->if_flags & XFS_IFEXTIREC) ||
2685 ((ifp->if_u1.if_extents != NULL) &&
2686 (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) {
2687 ASSERT(ifp->if_real_bytes != 0);
2688 xfs_iext_destroy(ifp);
2690 ASSERT(ifp->if_u1.if_extents == NULL ||
2691 ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext);
2692 ASSERT(ifp->if_real_bytes == 0);
2693 if (whichfork == XFS_ATTR_FORK) {
2694 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
2700 * This is called free all the memory associated with an inode.
2701 * It must free the inode itself and any buffers allocated for
2702 * if_extents/if_data and if_broot. It must also free the lock
2703 * associated with the inode.
2709 switch (ip->i_d.di_mode & S_IFMT) {
2713 xfs_idestroy_fork(ip, XFS_DATA_FORK);
2717 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
2718 mrfree(&ip->i_lock);
2719 mrfree(&ip->i_iolock);
2720 freesema(&ip->i_flock);
2722 #ifdef XFS_INODE_TRACE
2723 ktrace_free(ip->i_trace);
2725 #ifdef XFS_BMAP_TRACE
2726 ktrace_free(ip->i_xtrace);
2728 #ifdef XFS_BMBT_TRACE
2729 ktrace_free(ip->i_btrace);
2732 ktrace_free(ip->i_rwtrace);
2734 #ifdef XFS_ILOCK_TRACE
2735 ktrace_free(ip->i_lock_trace);
2737 #ifdef XFS_DIR2_TRACE
2738 ktrace_free(ip->i_dir_trace);
2742 * Only if we are shutting down the fs will we see an
2743 * inode still in the AIL. If it is there, we should remove
2744 * it to prevent a use-after-free from occurring.
2746 xfs_mount_t *mp = ip->i_mount;
2747 xfs_log_item_t *lip = &ip->i_itemp->ili_item;
2749 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
2750 XFS_FORCED_SHUTDOWN(ip->i_mount));
2751 if (lip->li_flags & XFS_LI_IN_AIL) {
2752 spin_lock(&mp->m_ail_lock);
2753 if (lip->li_flags & XFS_LI_IN_AIL)
2754 xfs_trans_delete_ail(mp, lip);
2756 spin_unlock(&mp->m_ail_lock);
2758 xfs_inode_item_destroy(ip);
2760 kmem_zone_free(xfs_inode_zone, ip);
2765 * Increment the pin count of the given buffer.
2766 * This value is protected by ipinlock spinlock in the mount structure.
2772 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
2774 atomic_inc(&ip->i_pincount);
2778 * Decrement the pin count of the given inode, and wake up
2779 * anyone in xfs_iwait_unpin() if the count goes to 0. The
2780 * inode must have been previously pinned with a call to xfs_ipin().
2786 ASSERT(atomic_read(&ip->i_pincount) > 0);
2788 if (atomic_dec_and_test(&ip->i_pincount))
2789 wake_up(&ip->i_ipin_wait);
2793 * This is called to unpin an inode. It can be directed to wait or to return
2794 * immediately without waiting for the inode to be unpinned. The caller must
2795 * have the inode locked in at least shared mode so that the buffer cannot be
2796 * subsequently pinned once someone is waiting for it to be unpinned.
2803 xfs_inode_log_item_t *iip = ip->i_itemp;
2805 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS));
2806 if (atomic_read(&ip->i_pincount) == 0)
2809 /* Give the log a push to start the unpinning I/O */
2810 xfs_log_force(ip->i_mount, (iip && iip->ili_last_lsn) ?
2811 iip->ili_last_lsn : 0, XFS_LOG_FORCE);
2813 wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0));
2820 __xfs_iunpin_wait(ip, 1);
2827 __xfs_iunpin_wait(ip, 0);
2832 * xfs_iextents_copy()
2834 * This is called to copy the REAL extents (as opposed to the delayed
2835 * allocation extents) from the inode into the given buffer. It
2836 * returns the number of bytes copied into the buffer.
2838 * If there are no delayed allocation extents, then we can just
2839 * memcpy() the extents into the buffer. Otherwise, we need to
2840 * examine each extent in turn and skip those which are delayed.
2852 xfs_fsblock_t start_block;
2854 ifp = XFS_IFORK_PTR(ip, whichfork);
2855 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
2856 ASSERT(ifp->if_bytes > 0);
2858 nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2859 XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork);
2863 * There are some delayed allocation extents in the
2864 * inode, so copy the extents one at a time and skip
2865 * the delayed ones. There must be at least one
2866 * non-delayed extent.
2869 for (i = 0; i < nrecs; i++) {
2870 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
2871 start_block = xfs_bmbt_get_startblock(ep);
2872 if (ISNULLSTARTBLOCK(start_block)) {
2874 * It's a delayed allocation extent, so skip it.
2879 /* Translate to on disk format */
2880 put_unaligned(cpu_to_be64(ep->l0), &dp->l0);
2881 put_unaligned(cpu_to_be64(ep->l1), &dp->l1);
2885 ASSERT(copied != 0);
2886 xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip));
2888 return (copied * (uint)sizeof(xfs_bmbt_rec_t));
2892 * Each of the following cases stores data into the same region
2893 * of the on-disk inode, so only one of them can be valid at
2894 * any given time. While it is possible to have conflicting formats
2895 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
2896 * in EXTENTS format, this can only happen when the fork has
2897 * changed formats after being modified but before being flushed.
2898 * In these cases, the format always takes precedence, because the
2899 * format indicates the current state of the fork.
2906 xfs_inode_log_item_t *iip,
2913 #ifdef XFS_TRANS_DEBUG
2916 static const short brootflag[2] =
2917 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
2918 static const short dataflag[2] =
2919 { XFS_ILOG_DDATA, XFS_ILOG_ADATA };
2920 static const short extflag[2] =
2921 { XFS_ILOG_DEXT, XFS_ILOG_AEXT };
2925 ifp = XFS_IFORK_PTR(ip, whichfork);
2927 * This can happen if we gave up in iformat in an error path,
2928 * for the attribute fork.
2931 ASSERT(whichfork == XFS_ATTR_FORK);
2934 cp = XFS_DFORK_PTR(dip, whichfork);
2936 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
2937 case XFS_DINODE_FMT_LOCAL:
2938 if ((iip->ili_format.ilf_fields & dataflag[whichfork]) &&
2939 (ifp->if_bytes > 0)) {
2940 ASSERT(ifp->if_u1.if_data != NULL);
2941 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2942 memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
2946 case XFS_DINODE_FMT_EXTENTS:
2947 ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
2948 !(iip->ili_format.ilf_fields & extflag[whichfork]));
2949 ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) ||
2950 (ifp->if_bytes == 0));
2951 ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) ||
2952 (ifp->if_bytes > 0));
2953 if ((iip->ili_format.ilf_fields & extflag[whichfork]) &&
2954 (ifp->if_bytes > 0)) {
2955 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
2956 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
2961 case XFS_DINODE_FMT_BTREE:
2962 if ((iip->ili_format.ilf_fields & brootflag[whichfork]) &&
2963 (ifp->if_broot_bytes > 0)) {
2964 ASSERT(ifp->if_broot != NULL);
2965 ASSERT(ifp->if_broot_bytes <=
2966 (XFS_IFORK_SIZE(ip, whichfork) +
2967 XFS_BROOT_SIZE_ADJ));
2968 xfs_bmbt_to_bmdr(ifp->if_broot, ifp->if_broot_bytes,
2969 (xfs_bmdr_block_t *)cp,
2970 XFS_DFORK_SIZE(dip, mp, whichfork));
2974 case XFS_DINODE_FMT_DEV:
2975 if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) {
2976 ASSERT(whichfork == XFS_DATA_FORK);
2977 dip->di_u.di_dev = cpu_to_be32(ip->i_df.if_u2.if_rdev);
2981 case XFS_DINODE_FMT_UUID:
2982 if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) {
2983 ASSERT(whichfork == XFS_DATA_FORK);
2984 memcpy(&dip->di_u.di_muuid, &ip->i_df.if_u2.if_uuid,
2998 * xfs_iflush() will write a modified inode's changes out to the
2999 * inode's on disk home. The caller must have the inode lock held
3000 * in at least shared mode and the inode flush semaphore must be
3001 * held as well. The inode lock will still be held upon return from
3002 * the call and the caller is free to unlock it.
3003 * The inode flush lock will be unlocked when the inode reaches the disk.
3004 * The flags indicate how the inode's buffer should be written out.
3011 xfs_inode_log_item_t *iip;
3018 int clcount; /* count of inodes clustered */
3020 struct hlist_node *entry;
3021 enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) };
3022 int noblock = (flags == XFS_IFLUSH_ASYNC_NOBLOCK);
3024 XFS_STATS_INC(xs_iflush_count);
3026 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
3027 ASSERT(issemalocked(&(ip->i_flock)));
3028 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3029 ip->i_d.di_nextents > ip->i_df.if_ext_max);
3035 * If the inode isn't dirty, then just release the inode
3036 * flush lock and do nothing.
3038 if ((ip->i_update_core == 0) &&
3039 ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
3040 ASSERT((iip != NULL) ?
3041 !(iip->ili_item.li_flags & XFS_LI_IN_AIL) : 1);
3047 * We can't flush the inode until it is unpinned, so wait for it if we
3048 * are allowed to block. We know noone new can pin it, because we are
3049 * holding the inode lock shared and you need to hold it exclusively to
3052 * If we are not allowed to block, force the log out asynchronously so
3053 * that when we come back the inode will be unpinned. If other inodes
3054 * in the same cluster are dirty, they will probably write the inode
3055 * out for us if they occur after the log force completes.
3057 if (noblock && xfs_ipincount(ip)) {
3058 xfs_iunpin_nowait(ip);
3062 xfs_iunpin_wait(ip);
3065 * This may have been unpinned because the filesystem is shutting
3066 * down forcibly. If that's the case we must not write this inode
3067 * to disk, because the log record didn't make it to disk!
3069 if (XFS_FORCED_SHUTDOWN(mp)) {
3070 ip->i_update_core = 0;
3072 iip->ili_format.ilf_fields = 0;
3074 return XFS_ERROR(EIO);
3078 * Decide how buffer will be flushed out. This is done before
3079 * the call to xfs_iflush_int because this field is zeroed by it.
3081 if (iip != NULL && iip->ili_format.ilf_fields != 0) {
3083 * Flush out the inode buffer according to the directions
3084 * of the caller. In the cases where the caller has given
3085 * us a choice choose the non-delwri case. This is because
3086 * the inode is in the AIL and we need to get it out soon.
3089 case XFS_IFLUSH_SYNC:
3090 case XFS_IFLUSH_DELWRI_ELSE_SYNC:
3093 case XFS_IFLUSH_ASYNC_NOBLOCK:
3094 case XFS_IFLUSH_ASYNC:
3095 case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
3098 case XFS_IFLUSH_DELWRI:
3108 case XFS_IFLUSH_DELWRI_ELSE_SYNC:
3109 case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
3110 case XFS_IFLUSH_DELWRI:
3113 case XFS_IFLUSH_ASYNC_NOBLOCK:
3114 case XFS_IFLUSH_ASYNC:
3117 case XFS_IFLUSH_SYNC:
3128 * Get the buffer containing the on-disk inode.
3130 error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0,
3131 noblock ? XFS_BUF_TRYLOCK : XFS_BUF_LOCK);
3138 * First flush out the inode that xfs_iflush was called with.
3140 error = xfs_iflush_int(ip, bp);
3146 * If the buffer is pinned then push on the log now so we won't
3147 * get stuck waiting in the write for too long.
3149 if (XFS_BUF_ISPINNED(bp))
3150 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
3154 * see if other inodes can be gathered into this write
3156 spin_lock(&ip->i_cluster->icl_lock);
3157 ip->i_cluster->icl_buf = bp;
3160 hlist_for_each_entry(iq, entry, &ip->i_cluster->icl_inodes, i_cnode) {
3165 * Do an un-protected check to see if the inode is dirty and
3166 * is a candidate for flushing. These checks will be repeated
3167 * later after the appropriate locks are acquired.
3170 if ((iq->i_update_core == 0) &&
3172 !(iip->ili_format.ilf_fields & XFS_ILOG_ALL)) &&
3173 xfs_ipincount(iq) == 0) {
3178 * Try to get locks. If any are unavailable,
3179 * then this inode cannot be flushed and is skipped.
3182 /* get inode locks (just i_lock) */
3183 if (xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) {
3184 /* get inode flush lock */
3185 if (xfs_iflock_nowait(iq)) {
3186 /* check if pinned */
3187 if (xfs_ipincount(iq) == 0) {
3188 /* arriving here means that
3189 * this inode can be flushed.
3190 * first re-check that it's
3194 if ((iq->i_update_core != 0)||
3196 (iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
3198 error = xfs_iflush_int(iq, bp);
3202 goto cluster_corrupt_out;
3211 xfs_iunlock(iq, XFS_ILOCK_SHARED);
3214 spin_unlock(&ip->i_cluster->icl_lock);
3217 XFS_STATS_INC(xs_icluster_flushcnt);
3218 XFS_STATS_ADD(xs_icluster_flushinode, clcount);
3221 if (flags & INT_DELWRI) {
3222 xfs_bdwrite(mp, bp);
3223 } else if (flags & INT_ASYNC) {
3224 xfs_bawrite(mp, bp);
3226 error = xfs_bwrite(mp, bp);
3232 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3233 xfs_iflush_abort(ip);
3235 * Unlocks the flush lock
3237 return XFS_ERROR(EFSCORRUPTED);
3239 cluster_corrupt_out:
3240 /* Corruption detected in the clustering loop. Invalidate the
3241 * inode buffer and shut down the filesystem.
3243 spin_unlock(&ip->i_cluster->icl_lock);
3246 * Clean up the buffer. If it was B_DELWRI, just release it --
3247 * brelse can handle it with no problems. If not, shut down the
3248 * filesystem before releasing the buffer.
3250 if ((bufwasdelwri= XFS_BUF_ISDELAYWRITE(bp))) {
3254 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3258 * Just like incore_relse: if we have b_iodone functions,
3259 * mark the buffer as an error and call them. Otherwise
3260 * mark it as stale and brelse.
3262 if (XFS_BUF_IODONE_FUNC(bp)) {
3263 XFS_BUF_CLR_BDSTRAT_FUNC(bp);
3267 XFS_BUF_ERROR(bp,EIO);
3275 xfs_iflush_abort(iq);
3277 * Unlocks the flush lock
3279 return XFS_ERROR(EFSCORRUPTED);
3288 xfs_inode_log_item_t *iip;
3291 #ifdef XFS_TRANS_DEBUG
3295 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
3296 ASSERT(issemalocked(&(ip->i_flock)));
3297 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3298 ip->i_d.di_nextents > ip->i_df.if_ext_max);
3305 * If the inode isn't dirty, then just release the inode
3306 * flush lock and do nothing.
3308 if ((ip->i_update_core == 0) &&
3309 ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
3314 /* set *dip = inode's place in the buffer */
3315 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_boffset);
3318 * Clear i_update_core before copying out the data.
3319 * This is for coordination with our timestamp updates
3320 * that don't hold the inode lock. They will always
3321 * update the timestamps BEFORE setting i_update_core,
3322 * so if we clear i_update_core after they set it we
3323 * are guaranteed to see their updates to the timestamps.
3324 * I believe that this depends on strongly ordered memory
3325 * semantics, but we have that. We use the SYNCHRONIZE
3326 * macro to make sure that the compiler does not reorder
3327 * the i_update_core access below the data copy below.
3329 ip->i_update_core = 0;
3333 * Make sure to get the latest atime from the Linux inode.
3335 xfs_synchronize_atime(ip);
3337 if (XFS_TEST_ERROR(be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC,
3338 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
3339 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3340 "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p",
3341 ip->i_ino, be16_to_cpu(dip->di_core.di_magic), dip);
3344 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
3345 mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
3346 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3347 "xfs_iflush: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
3348 ip->i_ino, ip, ip->i_d.di_magic);
3351 if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
3353 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3354 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
3355 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
3356 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3357 "xfs_iflush: Bad regular inode %Lu, ptr 0x%p",
3361 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
3363 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3364 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
3365 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
3366 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
3367 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3368 "xfs_iflush: Bad directory inode %Lu, ptr 0x%p",
3373 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
3374 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
3375 XFS_RANDOM_IFLUSH_5)) {
3376 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3377 "xfs_iflush: detected corrupt incore inode %Lu, total extents = %d, nblocks = %Ld, ptr 0x%p",
3379 ip->i_d.di_nextents + ip->i_d.di_anextents,
3384 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3385 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
3386 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3387 "xfs_iflush: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
3388 ip->i_ino, ip->i_d.di_forkoff, ip);
3392 * bump the flush iteration count, used to detect flushes which
3393 * postdate a log record during recovery.
3396 ip->i_d.di_flushiter++;
3399 * Copy the dirty parts of the inode into the on-disk
3400 * inode. We always copy out the core of the inode,
3401 * because if the inode is dirty at all the core must
3404 xfs_dinode_to_disk(&dip->di_core, &ip->i_d);
3406 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3407 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3408 ip->i_d.di_flushiter = 0;
3411 * If this is really an old format inode and the superblock version
3412 * has not been updated to support only new format inodes, then
3413 * convert back to the old inode format. If the superblock version
3414 * has been updated, then make the conversion permanent.
3416 ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1 ||
3417 xfs_sb_version_hasnlink(&mp->m_sb));
3418 if (ip->i_d.di_version == XFS_DINODE_VERSION_1) {
3419 if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
3423 ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
3424 dip->di_core.di_onlink = cpu_to_be16(ip->i_d.di_nlink);
3427 * The superblock version has already been bumped,
3428 * so just make the conversion to the new inode
3431 ip->i_d.di_version = XFS_DINODE_VERSION_2;
3432 dip->di_core.di_version = XFS_DINODE_VERSION_2;
3433 ip->i_d.di_onlink = 0;
3434 dip->di_core.di_onlink = 0;
3435 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
3436 memset(&(dip->di_core.di_pad[0]), 0,
3437 sizeof(dip->di_core.di_pad));
3438 ASSERT(ip->i_d.di_projid == 0);
3442 if (xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp) == EFSCORRUPTED) {
3446 if (XFS_IFORK_Q(ip)) {
3448 * The only error from xfs_iflush_fork is on the data fork.
3450 (void) xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
3452 xfs_inobp_check(mp, bp);
3455 * We've recorded everything logged in the inode, so we'd
3456 * like to clear the ilf_fields bits so we don't log and
3457 * flush things unnecessarily. However, we can't stop
3458 * logging all this information until the data we've copied
3459 * into the disk buffer is written to disk. If we did we might
3460 * overwrite the copy of the inode in the log with all the
3461 * data after re-logging only part of it, and in the face of
3462 * a crash we wouldn't have all the data we need to recover.
3464 * What we do is move the bits to the ili_last_fields field.
3465 * When logging the inode, these bits are moved back to the
3466 * ilf_fields field. In the xfs_iflush_done() routine we
3467 * clear ili_last_fields, since we know that the information
3468 * those bits represent is permanently on disk. As long as
3469 * the flush completes before the inode is logged again, then
3470 * both ilf_fields and ili_last_fields will be cleared.
3472 * We can play with the ilf_fields bits here, because the inode
3473 * lock must be held exclusively in order to set bits there
3474 * and the flush lock protects the ili_last_fields bits.
3475 * Set ili_logged so the flush done
3476 * routine can tell whether or not to look in the AIL.
3477 * Also, store the current LSN of the inode so that we can tell
3478 * whether the item has moved in the AIL from xfs_iflush_done().
3479 * In order to read the lsn we need the AIL lock, because
3480 * it is a 64 bit value that cannot be read atomically.
3482 if (iip != NULL && iip->ili_format.ilf_fields != 0) {
3483 iip->ili_last_fields = iip->ili_format.ilf_fields;
3484 iip->ili_format.ilf_fields = 0;
3485 iip->ili_logged = 1;
3487 ASSERT(sizeof(xfs_lsn_t) == 8); /* don't lock if it shrinks */
3488 spin_lock(&mp->m_ail_lock);
3489 iip->ili_flush_lsn = iip->ili_item.li_lsn;
3490 spin_unlock(&mp->m_ail_lock);
3493 * Attach the function xfs_iflush_done to the inode's
3494 * buffer. This will remove the inode from the AIL
3495 * and unlock the inode's flush lock when the inode is
3496 * completely written to disk.
3498 xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*))
3499 xfs_iflush_done, (xfs_log_item_t *)iip);
3501 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
3502 ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL);
3505 * We're flushing an inode which is not in the AIL and has
3506 * not been logged but has i_update_core set. For this
3507 * case we can use a B_DELWRI flush and immediately drop
3508 * the inode flush lock because we can avoid the whole
3509 * AIL state thing. It's OK to drop the flush lock now,
3510 * because we've already locked the buffer and to do anything
3511 * you really need both.
3514 ASSERT(iip->ili_logged == 0);
3515 ASSERT(iip->ili_last_fields == 0);
3516 ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0);
3524 return XFS_ERROR(EFSCORRUPTED);
3529 * Flush all inactive inodes in mp.
3539 XFS_MOUNT_ILOCK(mp);
3545 /* Make sure we skip markers inserted by sync */
3546 if (ip->i_mount == NULL) {
3551 vp = XFS_ITOV_NULL(ip);
3553 XFS_MOUNT_IUNLOCK(mp);
3554 xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC);
3558 ASSERT(vn_count(vp) == 0);
3561 } while (ip != mp->m_inodes);
3563 XFS_MOUNT_IUNLOCK(mp);
3566 #ifdef XFS_ILOCK_TRACE
3567 ktrace_t *xfs_ilock_trace_buf;
3570 xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra)
3572 ktrace_enter(ip->i_lock_trace,
3574 (void *)(unsigned long)lock, /* 1 = LOCK, 3=UNLOCK, etc */
3575 (void *)(unsigned long)lockflags, /* XFS_ILOCK_EXCL etc */
3576 (void *)ra, /* caller of ilock */
3577 (void *)(unsigned long)current_cpu(),
3578 (void *)(unsigned long)current_pid(),
3579 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
3584 * Return a pointer to the extent record at file index idx.
3586 xfs_bmbt_rec_host_t *
3588 xfs_ifork_t *ifp, /* inode fork pointer */
3589 xfs_extnum_t idx) /* index of target extent */
3592 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
3593 return ifp->if_u1.if_ext_irec->er_extbuf;
3594 } else if (ifp->if_flags & XFS_IFEXTIREC) {
3595 xfs_ext_irec_t *erp; /* irec pointer */
3596 int erp_idx = 0; /* irec index */
3597 xfs_extnum_t page_idx = idx; /* ext index in target list */
3599 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3600 return &erp->er_extbuf[page_idx];
3601 } else if (ifp->if_bytes) {
3602 return &ifp->if_u1.if_extents[idx];
3609 * Insert new item(s) into the extent records for incore inode
3610 * fork 'ifp'. 'count' new items are inserted at index 'idx'.
3614 xfs_ifork_t *ifp, /* inode fork pointer */
3615 xfs_extnum_t idx, /* starting index of new items */
3616 xfs_extnum_t count, /* number of inserted items */
3617 xfs_bmbt_irec_t *new) /* items to insert */
3619 xfs_extnum_t i; /* extent record index */
3621 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3622 xfs_iext_add(ifp, idx, count);
3623 for (i = idx; i < idx + count; i++, new++)
3624 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new);
3628 * This is called when the amount of space required for incore file
3629 * extents needs to be increased. The ext_diff parameter stores the
3630 * number of new extents being added and the idx parameter contains
3631 * the extent index where the new extents will be added. If the new
3632 * extents are being appended, then we just need to (re)allocate and
3633 * initialize the space. Otherwise, if the new extents are being
3634 * inserted into the middle of the existing entries, a bit more work
3635 * is required to make room for the new extents to be inserted. The
3636 * caller is responsible for filling in the new extent entries upon
3641 xfs_ifork_t *ifp, /* inode fork pointer */
3642 xfs_extnum_t idx, /* index to begin adding exts */
3643 int ext_diff) /* number of extents to add */
3645 int byte_diff; /* new bytes being added */
3646 int new_size; /* size of extents after adding */
3647 xfs_extnum_t nextents; /* number of extents in file */
3649 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3650 ASSERT((idx >= 0) && (idx <= nextents));
3651 byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
3652 new_size = ifp->if_bytes + byte_diff;
3654 * If the new number of extents (nextents + ext_diff)
3655 * fits inside the inode, then continue to use the inline
3658 if (nextents + ext_diff <= XFS_INLINE_EXTS) {
3659 if (idx < nextents) {
3660 memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff],
3661 &ifp->if_u2.if_inline_ext[idx],
3662 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
3663 memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff);
3665 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
3666 ifp->if_real_bytes = 0;
3667 ifp->if_lastex = nextents + ext_diff;
3670 * Otherwise use a linear (direct) extent list.
3671 * If the extents are currently inside the inode,
3672 * xfs_iext_realloc_direct will switch us from
3673 * inline to direct extent allocation mode.
3675 else if (nextents + ext_diff <= XFS_LINEAR_EXTS) {
3676 xfs_iext_realloc_direct(ifp, new_size);
3677 if (idx < nextents) {
3678 memmove(&ifp->if_u1.if_extents[idx + ext_diff],
3679 &ifp->if_u1.if_extents[idx],
3680 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
3681 memset(&ifp->if_u1.if_extents[idx], 0, byte_diff);
3684 /* Indirection array */
3686 xfs_ext_irec_t *erp;
3690 ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS);
3691 if (ifp->if_flags & XFS_IFEXTIREC) {
3692 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1);
3694 xfs_iext_irec_init(ifp);
3695 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3696 erp = ifp->if_u1.if_ext_irec;
3698 /* Extents fit in target extent page */
3699 if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) {
3700 if (page_idx < erp->er_extcount) {
3701 memmove(&erp->er_extbuf[page_idx + ext_diff],
3702 &erp->er_extbuf[page_idx],
3703 (erp->er_extcount - page_idx) *
3704 sizeof(xfs_bmbt_rec_t));
3705 memset(&erp->er_extbuf[page_idx], 0, byte_diff);
3707 erp->er_extcount += ext_diff;
3708 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3710 /* Insert a new extent page */
3712 xfs_iext_add_indirect_multi(ifp,
3713 erp_idx, page_idx, ext_diff);
3716 * If extent(s) are being appended to the last page in
3717 * the indirection array and the new extent(s) don't fit
3718 * in the page, then erp is NULL and erp_idx is set to
3719 * the next index needed in the indirection array.
3722 int count = ext_diff;
3725 erp = xfs_iext_irec_new(ifp, erp_idx);
3726 erp->er_extcount = count;
3727 count -= MIN(count, (int)XFS_LINEAR_EXTS);
3734 ifp->if_bytes = new_size;
3738 * This is called when incore extents are being added to the indirection
3739 * array and the new extents do not fit in the target extent list. The
3740 * erp_idx parameter contains the irec index for the target extent list
3741 * in the indirection array, and the idx parameter contains the extent
3742 * index within the list. The number of extents being added is stored
3743 * in the count parameter.
3745 * |-------| |-------|
3746 * | | | | idx - number of extents before idx
3748 * | | | | count - number of extents being inserted at idx
3749 * |-------| |-------|
3750 * | count | | nex2 | nex2 - number of extents after idx + count
3751 * |-------| |-------|
3754 xfs_iext_add_indirect_multi(
3755 xfs_ifork_t *ifp, /* inode fork pointer */
3756 int erp_idx, /* target extent irec index */
3757 xfs_extnum_t idx, /* index within target list */
3758 int count) /* new extents being added */
3760 int byte_diff; /* new bytes being added */
3761 xfs_ext_irec_t *erp; /* pointer to irec entry */
3762 xfs_extnum_t ext_diff; /* number of extents to add */
3763 xfs_extnum_t ext_cnt; /* new extents still needed */
3764 xfs_extnum_t nex2; /* extents after idx + count */
3765 xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */
3766 int nlists; /* number of irec's (lists) */
3768 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3769 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3770 nex2 = erp->er_extcount - idx;
3771 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3774 * Save second part of target extent list
3775 * (all extents past */
3777 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3778 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_SLEEP);
3779 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
3780 erp->er_extcount -= nex2;
3781 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
3782 memset(&erp->er_extbuf[idx], 0, byte_diff);
3786 * Add the new extents to the end of the target
3787 * list, then allocate new irec record(s) and
3788 * extent buffer(s) as needed to store the rest
3789 * of the new extents.
3792 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount);
3794 erp->er_extcount += ext_diff;
3795 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3796 ext_cnt -= ext_diff;
3800 erp = xfs_iext_irec_new(ifp, erp_idx);
3801 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS);
3802 erp->er_extcount = ext_diff;
3803 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3804 ext_cnt -= ext_diff;
3807 /* Add nex2 extents back to indirection array */
3809 xfs_extnum_t ext_avail;
3812 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3813 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
3816 * If nex2 extents fit in the current page, append
3817 * nex2_ep after the new extents.
3819 if (nex2 <= ext_avail) {
3820 i = erp->er_extcount;
3823 * Otherwise, check if space is available in the
3826 else if ((erp_idx < nlists - 1) &&
3827 (nex2 <= (ext_avail = XFS_LINEAR_EXTS -
3828 ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) {
3831 /* Create a hole for nex2 extents */
3832 memmove(&erp->er_extbuf[nex2], erp->er_extbuf,
3833 erp->er_extcount * sizeof(xfs_bmbt_rec_t));
3836 * Final choice, create a new extent page for
3841 erp = xfs_iext_irec_new(ifp, erp_idx);
3843 memmove(&erp->er_extbuf[i], nex2_ep, byte_diff);
3844 kmem_free(nex2_ep, byte_diff);
3845 erp->er_extcount += nex2;
3846 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2);
3851 * This is called when the amount of space required for incore file
3852 * extents needs to be decreased. The ext_diff parameter stores the
3853 * number of extents to be removed and the idx parameter contains
3854 * the extent index where the extents will be removed from.
3856 * If the amount of space needed has decreased below the linear
3857 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
3858 * extent array. Otherwise, use kmem_realloc() to adjust the
3859 * size to what is needed.
3863 xfs_ifork_t *ifp, /* inode fork pointer */
3864 xfs_extnum_t idx, /* index to begin removing exts */
3865 int ext_diff) /* number of extents to remove */
3867 xfs_extnum_t nextents; /* number of extents in file */
3868 int new_size; /* size of extents after removal */
3870 ASSERT(ext_diff > 0);
3871 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3872 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
3874 if (new_size == 0) {
3875 xfs_iext_destroy(ifp);
3876 } else if (ifp->if_flags & XFS_IFEXTIREC) {
3877 xfs_iext_remove_indirect(ifp, idx, ext_diff);
3878 } else if (ifp->if_real_bytes) {
3879 xfs_iext_remove_direct(ifp, idx, ext_diff);
3881 xfs_iext_remove_inline(ifp, idx, ext_diff);
3883 ifp->if_bytes = new_size;
3887 * This removes ext_diff extents from the inline buffer, beginning
3888 * at extent index idx.
3891 xfs_iext_remove_inline(
3892 xfs_ifork_t *ifp, /* inode fork pointer */
3893 xfs_extnum_t idx, /* index to begin removing exts */
3894 int ext_diff) /* number of extents to remove */
3896 int nextents; /* number of extents in file */
3898 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3899 ASSERT(idx < XFS_INLINE_EXTS);
3900 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3901 ASSERT(((nextents - ext_diff) > 0) &&
3902 (nextents - ext_diff) < XFS_INLINE_EXTS);
3904 if (idx + ext_diff < nextents) {
3905 memmove(&ifp->if_u2.if_inline_ext[idx],
3906 &ifp->if_u2.if_inline_ext[idx + ext_diff],
3907 (nextents - (idx + ext_diff)) *
3908 sizeof(xfs_bmbt_rec_t));
3909 memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff],
3910 0, ext_diff * sizeof(xfs_bmbt_rec_t));
3912 memset(&ifp->if_u2.if_inline_ext[idx], 0,
3913 ext_diff * sizeof(xfs_bmbt_rec_t));
3918 * This removes ext_diff extents from a linear (direct) extent list,
3919 * beginning at extent index idx. If the extents are being removed
3920 * from the end of the list (ie. truncate) then we just need to re-
3921 * allocate the list to remove the extra space. Otherwise, if the
3922 * extents are being removed from the middle of the existing extent
3923 * entries, then we first need to move the extent records beginning
3924 * at idx + ext_diff up in the list to overwrite the records being
3925 * removed, then remove the extra space via kmem_realloc.
3928 xfs_iext_remove_direct(
3929 xfs_ifork_t *ifp, /* inode fork pointer */
3930 xfs_extnum_t idx, /* index to begin removing exts */
3931 int ext_diff) /* number of extents to remove */
3933 xfs_extnum_t nextents; /* number of extents in file */
3934 int new_size; /* size of extents after removal */
3936 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3937 new_size = ifp->if_bytes -
3938 (ext_diff * sizeof(xfs_bmbt_rec_t));
3939 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3941 if (new_size == 0) {
3942 xfs_iext_destroy(ifp);
3945 /* Move extents up in the list (if needed) */
3946 if (idx + ext_diff < nextents) {
3947 memmove(&ifp->if_u1.if_extents[idx],
3948 &ifp->if_u1.if_extents[idx + ext_diff],
3949 (nextents - (idx + ext_diff)) *
3950 sizeof(xfs_bmbt_rec_t));
3952 memset(&ifp->if_u1.if_extents[nextents - ext_diff],
3953 0, ext_diff * sizeof(xfs_bmbt_rec_t));
3955 * Reallocate the direct extent list. If the extents
3956 * will fit inside the inode then xfs_iext_realloc_direct
3957 * will switch from direct to inline extent allocation
3960 xfs_iext_realloc_direct(ifp, new_size);
3961 ifp->if_bytes = new_size;
3965 * This is called when incore extents are being removed from the
3966 * indirection array and the extents being removed span multiple extent
3967 * buffers. The idx parameter contains the file extent index where we
3968 * want to begin removing extents, and the count parameter contains
3969 * how many extents need to be removed.
3971 * |-------| |-------|
3972 * | nex1 | | | nex1 - number of extents before idx
3973 * |-------| | count |
3974 * | | | | count - number of extents being removed at idx
3975 * | count | |-------|
3976 * | | | nex2 | nex2 - number of extents after idx + count
3977 * |-------| |-------|
3980 xfs_iext_remove_indirect(
3981 xfs_ifork_t *ifp, /* inode fork pointer */
3982 xfs_extnum_t idx, /* index to begin removing extents */
3983 int count) /* number of extents to remove */
3985 xfs_ext_irec_t *erp; /* indirection array pointer */
3986 int erp_idx = 0; /* indirection array index */
3987 xfs_extnum_t ext_cnt; /* extents left to remove */
3988 xfs_extnum_t ext_diff; /* extents to remove in current list */
3989 xfs_extnum_t nex1; /* number of extents before idx */
3990 xfs_extnum_t nex2; /* extents after idx + count */
3991 int nlists; /* entries in indirection array */
3992 int page_idx = idx; /* index in target extent list */
3994 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3995 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3996 ASSERT(erp != NULL);
3997 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4001 nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0);
4002 ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1));
4004 * Check for deletion of entire list;
4005 * xfs_iext_irec_remove() updates extent offsets.
4007 if (ext_diff == erp->er_extcount) {
4008 xfs_iext_irec_remove(ifp, erp_idx);
4009 ext_cnt -= ext_diff;
4012 ASSERT(erp_idx < ifp->if_real_bytes /
4014 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4021 /* Move extents up (if needed) */
4023 memmove(&erp->er_extbuf[nex1],
4024 &erp->er_extbuf[nex1 + ext_diff],
4025 nex2 * sizeof(xfs_bmbt_rec_t));
4027 /* Zero out rest of page */
4028 memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ -
4029 ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t))));
4030 /* Update remaining counters */
4031 erp->er_extcount -= ext_diff;
4032 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff);
4033 ext_cnt -= ext_diff;
4038 ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t);
4039 xfs_iext_irec_compact(ifp);
4043 * Create, destroy, or resize a linear (direct) block of extents.
4046 xfs_iext_realloc_direct(
4047 xfs_ifork_t *ifp, /* inode fork pointer */
4048 int new_size) /* new size of extents */
4050 int rnew_size; /* real new size of extents */
4052 rnew_size = new_size;
4054 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) ||
4055 ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) &&
4056 (new_size != ifp->if_real_bytes)));
4058 /* Free extent records */
4059 if (new_size == 0) {
4060 xfs_iext_destroy(ifp);
4062 /* Resize direct extent list and zero any new bytes */
4063 else if (ifp->if_real_bytes) {
4064 /* Check if extents will fit inside the inode */
4065 if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) {
4066 xfs_iext_direct_to_inline(ifp, new_size /
4067 (uint)sizeof(xfs_bmbt_rec_t));
4068 ifp->if_bytes = new_size;
4071 if (!is_power_of_2(new_size)){
4072 rnew_size = roundup_pow_of_two(new_size);
4074 if (rnew_size != ifp->if_real_bytes) {
4075 ifp->if_u1.if_extents =
4076 kmem_realloc(ifp->if_u1.if_extents,
4081 if (rnew_size > ifp->if_real_bytes) {
4082 memset(&ifp->if_u1.if_extents[ifp->if_bytes /
4083 (uint)sizeof(xfs_bmbt_rec_t)], 0,
4084 rnew_size - ifp->if_real_bytes);
4088 * Switch from the inline extent buffer to a direct
4089 * extent list. Be sure to include the inline extent
4090 * bytes in new_size.
4093 new_size += ifp->if_bytes;
4094 if (!is_power_of_2(new_size)) {
4095 rnew_size = roundup_pow_of_two(new_size);
4097 xfs_iext_inline_to_direct(ifp, rnew_size);
4099 ifp->if_real_bytes = rnew_size;
4100 ifp->if_bytes = new_size;
4104 * Switch from linear (direct) extent records to inline buffer.
4107 xfs_iext_direct_to_inline(
4108 xfs_ifork_t *ifp, /* inode fork pointer */
4109 xfs_extnum_t nextents) /* number of extents in file */
4111 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
4112 ASSERT(nextents <= XFS_INLINE_EXTS);
4114 * The inline buffer was zeroed when we switched
4115 * from inline to direct extent allocation mode,
4116 * so we don't need to clear it here.
4118 memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
4119 nextents * sizeof(xfs_bmbt_rec_t));
4120 kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes);
4121 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
4122 ifp->if_real_bytes = 0;
4126 * Switch from inline buffer to linear (direct) extent records.
4127 * new_size should already be rounded up to the next power of 2
4128 * by the caller (when appropriate), so use new_size as it is.
4129 * However, since new_size may be rounded up, we can't update
4130 * if_bytes here. It is the caller's responsibility to update
4131 * if_bytes upon return.
4134 xfs_iext_inline_to_direct(
4135 xfs_ifork_t *ifp, /* inode fork pointer */
4136 int new_size) /* number of extents in file */
4138 ifp->if_u1.if_extents = kmem_alloc(new_size, KM_SLEEP);
4139 memset(ifp->if_u1.if_extents, 0, new_size);
4140 if (ifp->if_bytes) {
4141 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
4143 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
4144 sizeof(xfs_bmbt_rec_t));
4146 ifp->if_real_bytes = new_size;
4150 * Resize an extent indirection array to new_size bytes.
4153 xfs_iext_realloc_indirect(
4154 xfs_ifork_t *ifp, /* inode fork pointer */
4155 int new_size) /* new indirection array size */
4157 int nlists; /* number of irec's (ex lists) */
4158 int size; /* current indirection array size */
4160 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4161 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4162 size = nlists * sizeof(xfs_ext_irec_t);
4163 ASSERT(ifp->if_real_bytes);
4164 ASSERT((new_size >= 0) && (new_size != size));
4165 if (new_size == 0) {
4166 xfs_iext_destroy(ifp);
4168 ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
4169 kmem_realloc(ifp->if_u1.if_ext_irec,
4170 new_size, size, KM_SLEEP);
4175 * Switch from indirection array to linear (direct) extent allocations.
4178 xfs_iext_indirect_to_direct(
4179 xfs_ifork_t *ifp) /* inode fork pointer */
4181 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
4182 xfs_extnum_t nextents; /* number of extents in file */
4183 int size; /* size of file extents */
4185 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4186 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4187 ASSERT(nextents <= XFS_LINEAR_EXTS);
4188 size = nextents * sizeof(xfs_bmbt_rec_t);
4190 xfs_iext_irec_compact_full(ifp);
4191 ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ);
4193 ep = ifp->if_u1.if_ext_irec->er_extbuf;
4194 kmem_free(ifp->if_u1.if_ext_irec, sizeof(xfs_ext_irec_t));
4195 ifp->if_flags &= ~XFS_IFEXTIREC;
4196 ifp->if_u1.if_extents = ep;
4197 ifp->if_bytes = size;
4198 if (nextents < XFS_LINEAR_EXTS) {
4199 xfs_iext_realloc_direct(ifp, size);
4204 * Free incore file extents.
4208 xfs_ifork_t *ifp) /* inode fork pointer */
4210 if (ifp->if_flags & XFS_IFEXTIREC) {
4214 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4215 for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) {
4216 xfs_iext_irec_remove(ifp, erp_idx);
4218 ifp->if_flags &= ~XFS_IFEXTIREC;
4219 } else if (ifp->if_real_bytes) {
4220 kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes);
4221 } else if (ifp->if_bytes) {
4222 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
4223 sizeof(xfs_bmbt_rec_t));
4225 ifp->if_u1.if_extents = NULL;
4226 ifp->if_real_bytes = 0;
4231 * Return a pointer to the extent record for file system block bno.
4233 xfs_bmbt_rec_host_t * /* pointer to found extent record */
4234 xfs_iext_bno_to_ext(
4235 xfs_ifork_t *ifp, /* inode fork pointer */
4236 xfs_fileoff_t bno, /* block number to search for */
4237 xfs_extnum_t *idxp) /* index of target extent */
4239 xfs_bmbt_rec_host_t *base; /* pointer to first extent */
4240 xfs_filblks_t blockcount = 0; /* number of blocks in extent */
4241 xfs_bmbt_rec_host_t *ep = NULL; /* pointer to target extent */
4242 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
4243 int high; /* upper boundary in search */
4244 xfs_extnum_t idx = 0; /* index of target extent */
4245 int low; /* lower boundary in search */
4246 xfs_extnum_t nextents; /* number of file extents */
4247 xfs_fileoff_t startoff = 0; /* start offset of extent */
4249 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4250 if (nextents == 0) {
4255 if (ifp->if_flags & XFS_IFEXTIREC) {
4256 /* Find target extent list */
4258 erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx);
4259 base = erp->er_extbuf;
4260 high = erp->er_extcount - 1;
4262 base = ifp->if_u1.if_extents;
4263 high = nextents - 1;
4265 /* Binary search extent records */
4266 while (low <= high) {
4267 idx = (low + high) >> 1;
4269 startoff = xfs_bmbt_get_startoff(ep);
4270 blockcount = xfs_bmbt_get_blockcount(ep);
4271 if (bno < startoff) {
4273 } else if (bno >= startoff + blockcount) {
4276 /* Convert back to file-based extent index */
4277 if (ifp->if_flags & XFS_IFEXTIREC) {
4278 idx += erp->er_extoff;
4284 /* Convert back to file-based extent index */
4285 if (ifp->if_flags & XFS_IFEXTIREC) {
4286 idx += erp->er_extoff;
4288 if (bno >= startoff + blockcount) {
4289 if (++idx == nextents) {
4292 ep = xfs_iext_get_ext(ifp, idx);
4300 * Return a pointer to the indirection array entry containing the
4301 * extent record for filesystem block bno. Store the index of the
4302 * target irec in *erp_idxp.
4304 xfs_ext_irec_t * /* pointer to found extent record */
4305 xfs_iext_bno_to_irec(
4306 xfs_ifork_t *ifp, /* inode fork pointer */
4307 xfs_fileoff_t bno, /* block number to search for */
4308 int *erp_idxp) /* irec index of target ext list */
4310 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
4311 xfs_ext_irec_t *erp_next; /* next indirection array entry */
4312 int erp_idx; /* indirection array index */
4313 int nlists; /* number of extent irec's (lists) */
4314 int high; /* binary search upper limit */
4315 int low; /* binary search lower limit */
4317 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4318 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4322 while (low <= high) {
4323 erp_idx = (low + high) >> 1;
4324 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4325 erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL;
4326 if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) {
4328 } else if (erp_next && bno >=
4329 xfs_bmbt_get_startoff(erp_next->er_extbuf)) {
4335 *erp_idxp = erp_idx;
4340 * Return a pointer to the indirection array entry containing the
4341 * extent record at file extent index *idxp. Store the index of the
4342 * target irec in *erp_idxp and store the page index of the target
4343 * extent record in *idxp.
4346 xfs_iext_idx_to_irec(
4347 xfs_ifork_t *ifp, /* inode fork pointer */
4348 xfs_extnum_t *idxp, /* extent index (file -> page) */
4349 int *erp_idxp, /* pointer to target irec */
4350 int realloc) /* new bytes were just added */
4352 xfs_ext_irec_t *prev; /* pointer to previous irec */
4353 xfs_ext_irec_t *erp = NULL; /* pointer to current irec */
4354 int erp_idx; /* indirection array index */
4355 int nlists; /* number of irec's (ex lists) */
4356 int high; /* binary search upper limit */
4357 int low; /* binary search lower limit */
4358 xfs_extnum_t page_idx = *idxp; /* extent index in target list */
4360 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4361 ASSERT(page_idx >= 0 && page_idx <=
4362 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
4363 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4368 /* Binary search extent irec's */
4369 while (low <= high) {
4370 erp_idx = (low + high) >> 1;
4371 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4372 prev = erp_idx > 0 ? erp - 1 : NULL;
4373 if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff &&
4374 realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) {
4376 } else if (page_idx > erp->er_extoff + erp->er_extcount ||
4377 (page_idx == erp->er_extoff + erp->er_extcount &&
4380 } else if (page_idx == erp->er_extoff + erp->er_extcount &&
4381 erp->er_extcount == XFS_LINEAR_EXTS) {
4385 erp = erp_idx < nlists ? erp + 1 : NULL;
4388 page_idx -= erp->er_extoff;
4393 *erp_idxp = erp_idx;
4398 * Allocate and initialize an indirection array once the space needed
4399 * for incore extents increases above XFS_IEXT_BUFSZ.
4403 xfs_ifork_t *ifp) /* inode fork pointer */
4405 xfs_ext_irec_t *erp; /* indirection array pointer */
4406 xfs_extnum_t nextents; /* number of extents in file */
4408 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
4409 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4410 ASSERT(nextents <= XFS_LINEAR_EXTS);
4412 erp = (xfs_ext_irec_t *)
4413 kmem_alloc(sizeof(xfs_ext_irec_t), KM_SLEEP);
4415 if (nextents == 0) {
4416 ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP);
4417 } else if (!ifp->if_real_bytes) {
4418 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
4419 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
4420 xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ);
4422 erp->er_extbuf = ifp->if_u1.if_extents;
4423 erp->er_extcount = nextents;
4426 ifp->if_flags |= XFS_IFEXTIREC;
4427 ifp->if_real_bytes = XFS_IEXT_BUFSZ;
4428 ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t);
4429 ifp->if_u1.if_ext_irec = erp;
4435 * Allocate and initialize a new entry in the indirection array.
4439 xfs_ifork_t *ifp, /* inode fork pointer */
4440 int erp_idx) /* index for new irec */
4442 xfs_ext_irec_t *erp; /* indirection array pointer */
4443 int i; /* loop counter */
4444 int nlists; /* number of irec's (ex lists) */
4446 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4447 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4449 /* Resize indirection array */
4450 xfs_iext_realloc_indirect(ifp, ++nlists *
4451 sizeof(xfs_ext_irec_t));
4453 * Move records down in the array so the
4454 * new page can use erp_idx.
4456 erp = ifp->if_u1.if_ext_irec;
4457 for (i = nlists - 1; i > erp_idx; i--) {
4458 memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t));
4460 ASSERT(i == erp_idx);
4462 /* Initialize new extent record */
4463 erp = ifp->if_u1.if_ext_irec;
4464 erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP);
4465 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4466 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
4467 erp[erp_idx].er_extcount = 0;
4468 erp[erp_idx].er_extoff = erp_idx > 0 ?
4469 erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0;
4470 return (&erp[erp_idx]);
4474 * Remove a record from the indirection array.
4477 xfs_iext_irec_remove(
4478 xfs_ifork_t *ifp, /* inode fork pointer */
4479 int erp_idx) /* irec index to remove */
4481 xfs_ext_irec_t *erp; /* indirection array pointer */
4482 int i; /* loop counter */
4483 int nlists; /* number of irec's (ex lists) */
4485 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4486 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4487 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4488 if (erp->er_extbuf) {
4489 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1,
4491 kmem_free(erp->er_extbuf, XFS_IEXT_BUFSZ);
4493 /* Compact extent records */
4494 erp = ifp->if_u1.if_ext_irec;
4495 for (i = erp_idx; i < nlists - 1; i++) {
4496 memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t));
4499 * Manually free the last extent record from the indirection
4500 * array. A call to xfs_iext_realloc_indirect() with a size
4501 * of zero would result in a call to xfs_iext_destroy() which
4502 * would in turn call this function again, creating a nasty
4506 xfs_iext_realloc_indirect(ifp,
4507 nlists * sizeof(xfs_ext_irec_t));
4509 kmem_free(ifp->if_u1.if_ext_irec,
4510 sizeof(xfs_ext_irec_t));
4512 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4516 * This is called to clean up large amounts of unused memory allocated
4517 * by the indirection array. Before compacting anything though, verify
4518 * that the indirection array is still needed and switch back to the
4519 * linear extent list (or even the inline buffer) if possible. The
4520 * compaction policy is as follows:
4522 * Full Compaction: Extents fit into a single page (or inline buffer)
4523 * Full Compaction: Extents occupy less than 10% of allocated space
4524 * Partial Compaction: Extents occupy > 10% and < 50% of allocated space
4525 * No Compaction: Extents occupy at least 50% of allocated space
4528 xfs_iext_irec_compact(
4529 xfs_ifork_t *ifp) /* inode fork pointer */
4531 xfs_extnum_t nextents; /* number of extents in file */
4532 int nlists; /* number of irec's (ex lists) */
4534 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4535 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4536 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4538 if (nextents == 0) {
4539 xfs_iext_destroy(ifp);
4540 } else if (nextents <= XFS_INLINE_EXTS) {
4541 xfs_iext_indirect_to_direct(ifp);
4542 xfs_iext_direct_to_inline(ifp, nextents);
4543 } else if (nextents <= XFS_LINEAR_EXTS) {
4544 xfs_iext_indirect_to_direct(ifp);
4545 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 3) {
4546 xfs_iext_irec_compact_full(ifp);
4547 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) {
4548 xfs_iext_irec_compact_pages(ifp);
4553 * Combine extents from neighboring extent pages.
4556 xfs_iext_irec_compact_pages(
4557 xfs_ifork_t *ifp) /* inode fork pointer */
4559 xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */
4560 int erp_idx = 0; /* indirection array index */
4561 int nlists; /* number of irec's (ex lists) */
4563 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4564 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4565 while (erp_idx < nlists - 1) {
4566 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4568 if (erp_next->er_extcount <=
4569 (XFS_LINEAR_EXTS - erp->er_extcount)) {
4570 memmove(&erp->er_extbuf[erp->er_extcount],
4571 erp_next->er_extbuf, erp_next->er_extcount *
4572 sizeof(xfs_bmbt_rec_t));
4573 erp->er_extcount += erp_next->er_extcount;
4575 * Free page before removing extent record
4576 * so er_extoffs don't get modified in
4577 * xfs_iext_irec_remove.
4579 kmem_free(erp_next->er_extbuf, XFS_IEXT_BUFSZ);
4580 erp_next->er_extbuf = NULL;
4581 xfs_iext_irec_remove(ifp, erp_idx + 1);
4582 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4590 * Fully compact the extent records managed by the indirection array.
4593 xfs_iext_irec_compact_full(
4594 xfs_ifork_t *ifp) /* inode fork pointer */
4596 xfs_bmbt_rec_host_t *ep, *ep_next; /* extent record pointers */
4597 xfs_ext_irec_t *erp, *erp_next; /* extent irec pointers */
4598 int erp_idx = 0; /* extent irec index */
4599 int ext_avail; /* empty entries in ex list */
4600 int ext_diff; /* number of exts to add */
4601 int nlists; /* number of irec's (ex lists) */
4603 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4604 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4605 erp = ifp->if_u1.if_ext_irec;
4606 ep = &erp->er_extbuf[erp->er_extcount];
4608 ep_next = erp_next->er_extbuf;
4609 while (erp_idx < nlists - 1) {
4610 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
4611 ext_diff = MIN(ext_avail, erp_next->er_extcount);
4612 memcpy(ep, ep_next, ext_diff * sizeof(xfs_bmbt_rec_t));
4613 erp->er_extcount += ext_diff;
4614 erp_next->er_extcount -= ext_diff;
4615 /* Remove next page */
4616 if (erp_next->er_extcount == 0) {
4618 * Free page before removing extent record
4619 * so er_extoffs don't get modified in
4620 * xfs_iext_irec_remove.
4622 kmem_free(erp_next->er_extbuf,
4623 erp_next->er_extcount * sizeof(xfs_bmbt_rec_t));
4624 erp_next->er_extbuf = NULL;
4625 xfs_iext_irec_remove(ifp, erp_idx + 1);
4626 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4627 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4628 /* Update next page */
4630 /* Move rest of page up to become next new page */
4631 memmove(erp_next->er_extbuf, ep_next,
4632 erp_next->er_extcount * sizeof(xfs_bmbt_rec_t));
4633 ep_next = erp_next->er_extbuf;
4634 memset(&ep_next[erp_next->er_extcount], 0,
4635 (XFS_LINEAR_EXTS - erp_next->er_extcount) *
4636 sizeof(xfs_bmbt_rec_t));
4638 if (erp->er_extcount == XFS_LINEAR_EXTS) {
4640 if (erp_idx < nlists)
4641 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4645 ep = &erp->er_extbuf[erp->er_extcount];
4647 ep_next = erp_next->er_extbuf;
4652 * This is called to update the er_extoff field in the indirection
4653 * array when extents have been added or removed from one of the
4654 * extent lists. erp_idx contains the irec index to begin updating
4655 * at and ext_diff contains the number of extents that were added
4659 xfs_iext_irec_update_extoffs(
4660 xfs_ifork_t *ifp, /* inode fork pointer */
4661 int erp_idx, /* irec index to update */
4662 int ext_diff) /* number of new extents */
4664 int i; /* loop counter */
4665 int nlists; /* number of irec's (ex lists */
4667 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4668 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4669 for (i = erp_idx; i < nlists; i++) {
4670 ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff;