2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
25 #include "xfs_trans.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_btree.h"
39 #include "xfs_ialloc.h"
40 #include "xfs_quota.h"
41 #include "xfs_utils.h"
42 #include "xfs_trans_priv.h"
43 #include "xfs_inode_item.h"
45 #include "xfs_btree_trace.h"
46 #include "xfs_dir2_trace.h"
50 * Allocate and initialise an xfs_inode.
52 STATIC struct xfs_inode *
60 * if this didn't occur in transactions, we could use
61 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
62 * code up to do this anyway.
64 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
67 if (inode_init_always(mp->m_super, VFS_I(ip))) {
68 kmem_zone_free(xfs_inode_zone, ip);
72 ASSERT(atomic_read(&ip->i_iocount) == 0);
73 ASSERT(atomic_read(&ip->i_pincount) == 0);
74 ASSERT(!spin_is_locked(&ip->i_flags_lock));
75 ASSERT(completion_done(&ip->i_flush));
76 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
78 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
80 /* initialise the xfs inode */
83 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
85 memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
87 ip->i_update_core = 0;
88 ip->i_delayed_blks = 0;
89 memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
94 * Initialize inode's trace buffers.
96 #ifdef XFS_INODE_TRACE
97 ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS);
100 ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS);
102 #ifdef XFS_BTREE_TRACE
103 ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS);
106 ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS);
108 #ifdef XFS_ILOCK_TRACE
109 ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS);
111 #ifdef XFS_DIR2_TRACE
112 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
115 /* prevent anyone from using this yet */
116 VFS_I(ip)->i_state = I_NEW|I_LOCK;
123 struct xfs_inode *ip)
125 switch (ip->i_d.di_mode & S_IFMT) {
129 xfs_idestroy_fork(ip, XFS_DATA_FORK);
134 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
136 #ifdef XFS_INODE_TRACE
137 ktrace_free(ip->i_trace);
139 #ifdef XFS_BMAP_TRACE
140 ktrace_free(ip->i_xtrace);
142 #ifdef XFS_BTREE_TRACE
143 ktrace_free(ip->i_btrace);
146 ktrace_free(ip->i_rwtrace);
148 #ifdef XFS_ILOCK_TRACE
149 ktrace_free(ip->i_lock_trace);
151 #ifdef XFS_DIR2_TRACE
152 ktrace_free(ip->i_dir_trace);
157 * Only if we are shutting down the fs will we see an
158 * inode still in the AIL. If it is there, we should remove
159 * it to prevent a use-after-free from occurring.
161 xfs_log_item_t *lip = &ip->i_itemp->ili_item;
162 struct xfs_ail *ailp = lip->li_ailp;
164 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
165 XFS_FORCED_SHUTDOWN(ip->i_mount));
166 if (lip->li_flags & XFS_LI_IN_AIL) {
167 spin_lock(&ailp->xa_lock);
168 if (lip->li_flags & XFS_LI_IN_AIL)
169 xfs_trans_ail_delete(ailp, lip);
171 spin_unlock(&ailp->xa_lock);
173 xfs_inode_item_destroy(ip);
177 /* asserts to verify all state is correct here */
178 ASSERT(atomic_read(&ip->i_iocount) == 0);
179 ASSERT(atomic_read(&ip->i_pincount) == 0);
180 ASSERT(!spin_is_locked(&ip->i_flags_lock));
181 ASSERT(completion_done(&ip->i_flush));
183 kmem_zone_free(xfs_inode_zone, ip);
187 * Check the validity of the inode we just found it the cache
191 struct xfs_perag *pag,
192 struct xfs_inode *ip,
194 int lock_flags) __releases(pag->pag_ici_lock)
196 struct inode *inode = VFS_I(ip);
197 struct xfs_mount *mp = ip->i_mount;
200 spin_lock(&ip->i_flags_lock);
203 * If we are racing with another cache hit that is currently
204 * instantiating this inode or currently recycling it out of
205 * reclaimabe state, wait for the initialisation to complete
208 * XXX(hch): eventually we should do something equivalent to
209 * wait_on_inode to wait for these flags to be cleared
210 * instead of polling for it.
212 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
213 XFS_STATS_INC(xs_ig_frecycle);
219 * If lookup is racing with unlink return an error immediately.
221 if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
227 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
228 * Need to carefully get it back into useable state.
230 if (ip->i_flags & XFS_IRECLAIMABLE) {
231 xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
234 * We need to set XFS_INEW atomically with clearing the
235 * reclaimable tag so that we do have an indicator of the
236 * inode still being initialized.
238 ip->i_flags |= XFS_INEW;
239 ip->i_flags &= ~XFS_IRECLAIMABLE;
240 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
242 spin_unlock(&ip->i_flags_lock);
243 read_unlock(&pag->pag_ici_lock);
245 error = -inode_init_always(mp->m_super, inode);
248 * Re-initializing the inode failed, and we are in deep
249 * trouble. Try to re-add it to the reclaim list.
251 read_lock(&pag->pag_ici_lock);
252 spin_lock(&ip->i_flags_lock);
254 ip->i_flags &= ~XFS_INEW;
255 ip->i_flags |= XFS_IRECLAIMABLE;
256 __xfs_inode_set_reclaim_tag(pag, ip);
259 inode->i_state = I_LOCK|I_NEW;
261 /* If the VFS inode is being torn down, pause and try again. */
267 /* We've got a live one. */
268 spin_unlock(&ip->i_flags_lock);
269 read_unlock(&pag->pag_ici_lock);
273 xfs_ilock(ip, lock_flags);
275 xfs_iflags_clear(ip, XFS_ISTALE);
276 xfs_itrace_exit_tag(ip, "xfs_iget.found");
277 XFS_STATS_INC(xs_ig_found);
281 spin_unlock(&ip->i_flags_lock);
282 read_unlock(&pag->pag_ici_lock);
289 struct xfs_mount *mp,
290 struct xfs_perag *pag,
293 struct xfs_inode **ipp,
298 struct xfs_inode *ip;
300 unsigned long first_index, mask;
301 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
303 ip = xfs_inode_alloc(mp, ino);
307 error = xfs_iread(mp, tp, ip, bno, flags);
311 xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
313 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
319 * Preload the radix tree so we can insert safely under the
320 * write spinlock. Note that we cannot sleep inside the preload
323 if (radix_tree_preload(GFP_KERNEL)) {
329 * Because the inode hasn't been added to the radix-tree yet it can't
330 * be found by another thread, so we can do the non-sleeping lock here.
333 if (!xfs_ilock_nowait(ip, lock_flags))
337 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
338 first_index = agino & mask;
339 write_lock(&pag->pag_ici_lock);
341 /* insert the new inode */
342 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
343 if (unlikely(error)) {
344 WARN_ON(error != -EEXIST);
345 XFS_STATS_INC(xs_ig_dup);
347 goto out_preload_end;
350 /* These values _must_ be set before releasing the radix tree lock! */
351 ip->i_udquot = ip->i_gdquot = NULL;
352 xfs_iflags_set(ip, XFS_INEW);
354 write_unlock(&pag->pag_ici_lock);
355 radix_tree_preload_end();
360 write_unlock(&pag->pag_ici_lock);
361 radix_tree_preload_end();
363 xfs_iunlock(ip, lock_flags);
365 __destroy_inode(VFS_I(ip));
371 * Look up an inode by number in the given file system.
372 * The inode is looked up in the cache held in each AG.
373 * If the inode is found in the cache, initialise the vfs inode
376 * If it is not in core, read it in from the file system's device,
377 * add it to the cache and initialise the vfs inode.
379 * The inode is locked according to the value of the lock_flags parameter.
380 * This flag parameter indicates how and if the inode's IO lock and inode lock
383 * mp -- the mount point structure for the current file system. It points
384 * to the inode hash table.
385 * tp -- a pointer to the current transaction if there is one. This is
386 * simply passed through to the xfs_iread() call.
387 * ino -- the number of the inode desired. This is the unique identifier
388 * within the file system for the inode being requested.
389 * lock_flags -- flags indicating how to lock the inode. See the comment
390 * for xfs_ilock() for a list of valid values.
391 * bno -- the block number starting the buffer containing the inode,
392 * if known (as by bulkstat), else 0.
409 /* the radix tree exists only in inode capable AGs */
410 if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi)
413 /* get the perag structure and ensure that it's inode capable */
414 pag = xfs_get_perag(mp, ino);
415 if (!pag->pagi_inodeok)
417 ASSERT(pag->pag_ici_init);
418 agino = XFS_INO_TO_AGINO(mp, ino);
422 read_lock(&pag->pag_ici_lock);
423 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
426 error = xfs_iget_cache_hit(pag, ip, flags, lock_flags);
428 goto out_error_or_again;
430 read_unlock(&pag->pag_ici_lock);
431 XFS_STATS_INC(xs_ig_missed);
433 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, bno,
436 goto out_error_or_again;
438 xfs_put_perag(mp, pag);
442 ASSERT(ip->i_df.if_ext_max ==
443 XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
445 * If we have a real type for an on-disk inode, we can set ops(&unlock)
446 * now. If it's a new inode being created, xfs_ialloc will handle it.
448 if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
453 if (error == EAGAIN) {
457 xfs_put_perag(mp, pag);
462 * Decrement reference count of an inode structure and unlock it.
464 * ip -- the inode being released
465 * lock_flags -- this parameter indicates the inode's locks to be
466 * to be released. See the comment on xfs_iunlock() for a list
470 xfs_iput(xfs_inode_t *ip,
473 xfs_itrace_entry(ip);
474 xfs_iunlock(ip, lock_flags);
479 * Special iput for brand-new inodes that are still locked
486 struct inode *inode = VFS_I(ip);
488 xfs_itrace_entry(ip);
490 if ((ip->i_d.di_mode == 0)) {
491 ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
492 make_bad_inode(inode);
494 if (inode->i_state & I_NEW)
495 unlock_new_inode(inode);
497 xfs_iunlock(ip, lock_flags);
502 * This is called free all the memory associated with an inode.
503 * It must free the inode itself and any buffers allocated for
504 * if_extents/if_data and if_broot. It must also free the lock
505 * associated with the inode.
507 * Note: because we don't initialise everything on reallocation out
508 * of the zone, we must ensure we nullify everything correctly before
509 * freeing the structure.
513 struct xfs_inode *ip)
515 struct xfs_mount *mp = ip->i_mount;
516 struct xfs_perag *pag;
518 XFS_STATS_INC(xs_ig_reclaims);
521 * Remove the inode from the per-AG radix tree. It doesn't matter
522 * if it was never added to it because radix_tree_delete can deal
523 * with that case just fine.
525 pag = xfs_get_perag(mp, ip->i_ino);
526 write_lock(&pag->pag_ici_lock);
527 radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino));
528 write_unlock(&pag->pag_ici_lock);
529 xfs_put_perag(mp, pag);
532 * Here we do an (almost) spurious inode lock in order to coordinate
533 * with inode cache radix tree lookups. This is because the lookup
534 * can reference the inodes in the cache without taking references.
536 * We make that OK here by ensuring that we wait until the inode is
537 * unlocked after the lookup before we go ahead and free it. We get
538 * both the ilock and the iolock because the code may need to drop the
539 * ilock one but will still hold the iolock.
541 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
543 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
549 * This is a wrapper routine around the xfs_ilock() routine
550 * used to centralize some grungy code. It is used in places
551 * that wish to lock the inode solely for reading the extents.
552 * The reason these places can't just call xfs_ilock(SHARED)
553 * is that the inode lock also guards to bringing in of the
554 * extents from disk for a file in b-tree format. If the inode
555 * is in b-tree format, then we need to lock the inode exclusively
556 * until the extents are read in. Locking it exclusively all
557 * the time would limit our parallelism unnecessarily, though.
558 * What we do instead is check to see if the extents have been
559 * read in yet, and only lock the inode exclusively if they
562 * The function returns a value which should be given to the
563 * corresponding xfs_iunlock_map_shared(). This value is
564 * the mode in which the lock was actually taken.
567 xfs_ilock_map_shared(
572 if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
573 ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
574 lock_mode = XFS_ILOCK_EXCL;
576 lock_mode = XFS_ILOCK_SHARED;
579 xfs_ilock(ip, lock_mode);
585 * This is simply the unlock routine to go with xfs_ilock_map_shared().
586 * All it does is call xfs_iunlock() with the given lock_mode.
589 xfs_iunlock_map_shared(
591 unsigned int lock_mode)
593 xfs_iunlock(ip, lock_mode);
597 * The xfs inode contains 2 locks: a multi-reader lock called the
598 * i_iolock and a multi-reader lock called the i_lock. This routine
599 * allows either or both of the locks to be obtained.
601 * The 2 locks should always be ordered so that the IO lock is
602 * obtained first in order to prevent deadlock.
604 * ip -- the inode being locked
605 * lock_flags -- this parameter indicates the inode's locks
606 * to be locked. It can be:
611 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
612 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
613 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
614 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
622 * You can't set both SHARED and EXCL for the same lock,
623 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
624 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
626 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
627 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
628 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
629 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
630 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
632 if (lock_flags & XFS_IOLOCK_EXCL)
633 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
634 else if (lock_flags & XFS_IOLOCK_SHARED)
635 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
637 if (lock_flags & XFS_ILOCK_EXCL)
638 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
639 else if (lock_flags & XFS_ILOCK_SHARED)
640 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
642 xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address);
646 * This is just like xfs_ilock(), except that the caller
647 * is guaranteed not to sleep. It returns 1 if it gets
648 * the requested locks and 0 otherwise. If the IO lock is
649 * obtained but the inode lock cannot be, then the IO lock
650 * is dropped before returning.
652 * ip -- the inode being locked
653 * lock_flags -- this parameter indicates the inode's locks to be
654 * to be locked. See the comment for xfs_ilock() for a list
663 * You can't set both SHARED and EXCL for the same lock,
664 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
665 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
667 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
668 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
669 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
670 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
671 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
673 if (lock_flags & XFS_IOLOCK_EXCL) {
674 if (!mrtryupdate(&ip->i_iolock))
676 } else if (lock_flags & XFS_IOLOCK_SHARED) {
677 if (!mrtryaccess(&ip->i_iolock))
680 if (lock_flags & XFS_ILOCK_EXCL) {
681 if (!mrtryupdate(&ip->i_lock))
682 goto out_undo_iolock;
683 } else if (lock_flags & XFS_ILOCK_SHARED) {
684 if (!mrtryaccess(&ip->i_lock))
685 goto out_undo_iolock;
687 xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address);
691 if (lock_flags & XFS_IOLOCK_EXCL)
692 mrunlock_excl(&ip->i_iolock);
693 else if (lock_flags & XFS_IOLOCK_SHARED)
694 mrunlock_shared(&ip->i_iolock);
700 * xfs_iunlock() is used to drop the inode locks acquired with
701 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
702 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
703 * that we know which locks to drop.
705 * ip -- the inode being unlocked
706 * lock_flags -- this parameter indicates the inode's locks to be
707 * to be unlocked. See the comment for xfs_ilock() for a list
708 * of valid values for this parameter.
717 * You can't set both SHARED and EXCL for the same lock,
718 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
719 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
721 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
722 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
723 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
724 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
725 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY |
726 XFS_LOCK_DEP_MASK)) == 0);
727 ASSERT(lock_flags != 0);
729 if (lock_flags & XFS_IOLOCK_EXCL)
730 mrunlock_excl(&ip->i_iolock);
731 else if (lock_flags & XFS_IOLOCK_SHARED)
732 mrunlock_shared(&ip->i_iolock);
734 if (lock_flags & XFS_ILOCK_EXCL)
735 mrunlock_excl(&ip->i_lock);
736 else if (lock_flags & XFS_ILOCK_SHARED)
737 mrunlock_shared(&ip->i_lock);
739 if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) &&
740 !(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) {
742 * Let the AIL know that this item has been unlocked in case
743 * it is in the AIL and anyone is waiting on it. Don't do
744 * this if the caller has asked us not to.
746 xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp,
747 (xfs_log_item_t*)(ip->i_itemp));
749 xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address);
753 * give up write locks. the i/o lock cannot be held nested
754 * if it is being demoted.
761 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
762 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
764 if (lock_flags & XFS_ILOCK_EXCL)
765 mrdemote(&ip->i_lock);
766 if (lock_flags & XFS_IOLOCK_EXCL)
767 mrdemote(&ip->i_iolock);
772 * Debug-only routine, without additional rw_semaphore APIs, we can
773 * now only answer requests regarding whether we hold the lock for write
774 * (reader state is outside our visibility, we only track writer state).
776 * Note: this means !xfs_isilocked would give false positives, so don't do that.
783 if ((lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) ==
785 if (!ip->i_lock.mr_writer)
789 if ((lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) ==
791 if (!ip->i_iolock.mr_writer)
799 #ifdef XFS_INODE_TRACE
801 #define KTRACE_ENTER(ip, vk, s, line, ra) \
802 ktrace_enter((ip)->i_trace, \
803 /* 0 */ (void *)(__psint_t)(vk), \
804 /* 1 */ (void *)(s), \
805 /* 2 */ (void *)(__psint_t) line, \
806 /* 3 */ (void *)(__psint_t)atomic_read(&VFS_I(ip)->i_count), \
807 /* 4 */ (void *)(ra), \
809 /* 6 */ (void *)(__psint_t)current_cpu(), \
810 /* 7 */ (void *)(__psint_t)current_pid(), \
811 /* 8 */ (void *)__return_address, \
812 /* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL)
815 * Vnode tracing code.
818 _xfs_itrace_entry(xfs_inode_t *ip, const char *func, inst_t *ra)
820 KTRACE_ENTER(ip, INODE_KTRACE_ENTRY, func, 0, ra);
824 _xfs_itrace_exit(xfs_inode_t *ip, const char *func, inst_t *ra)
826 KTRACE_ENTER(ip, INODE_KTRACE_EXIT, func, 0, ra);
830 xfs_itrace_hold(xfs_inode_t *ip, char *file, int line, inst_t *ra)
832 KTRACE_ENTER(ip, INODE_KTRACE_HOLD, file, line, ra);
836 _xfs_itrace_ref(xfs_inode_t *ip, char *file, int line, inst_t *ra)
838 KTRACE_ENTER(ip, INODE_KTRACE_REF, file, line, ra);
842 xfs_itrace_rele(xfs_inode_t *ip, char *file, int line, inst_t *ra)
844 KTRACE_ENTER(ip, INODE_KTRACE_RELE, file, line, ra);
846 #endif /* XFS_INODE_TRACE */