2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
25 #include "xfs_trans.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_btree.h"
39 #include "xfs_ialloc.h"
40 #include "xfs_quota.h"
41 #include "xfs_utils.h"
42 #include "xfs_trans_priv.h"
43 #include "xfs_inode_item.h"
45 #include "xfs_btree_trace.h"
46 #include "xfs_dir2_trace.h"
50 * Allocate and initialise an xfs_inode.
52 STATIC struct xfs_inode *
60 * if this didn't occur in transactions, we could use
61 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
62 * code up to do this anyway.
64 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
67 if (inode_init_always(mp->m_super, VFS_I(ip))) {
68 kmem_zone_free(xfs_inode_zone, ip);
72 ASSERT(atomic_read(&ip->i_iocount) == 0);
73 ASSERT(atomic_read(&ip->i_pincount) == 0);
74 ASSERT(!spin_is_locked(&ip->i_flags_lock));
75 ASSERT(completion_done(&ip->i_flush));
77 /* initialise the xfs inode */
80 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
82 memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
84 ip->i_update_core = 0;
85 ip->i_delayed_blks = 0;
86 memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
91 * Initialize inode's trace buffers.
93 #ifdef XFS_INODE_TRACE
94 ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS);
97 ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS);
99 #ifdef XFS_BTREE_TRACE
100 ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS);
103 ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS);
105 #ifdef XFS_ILOCK_TRACE
106 ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS);
108 #ifdef XFS_DIR2_TRACE
109 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
112 /* prevent anyone from using this yet */
113 VFS_I(ip)->i_state = I_NEW|I_LOCK;
120 struct xfs_inode *ip)
122 switch (ip->i_d.di_mode & S_IFMT) {
126 xfs_idestroy_fork(ip, XFS_DATA_FORK);
131 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
133 #ifdef XFS_INODE_TRACE
134 ktrace_free(ip->i_trace);
136 #ifdef XFS_BMAP_TRACE
137 ktrace_free(ip->i_xtrace);
139 #ifdef XFS_BTREE_TRACE
140 ktrace_free(ip->i_btrace);
143 ktrace_free(ip->i_rwtrace);
145 #ifdef XFS_ILOCK_TRACE
146 ktrace_free(ip->i_lock_trace);
148 #ifdef XFS_DIR2_TRACE
149 ktrace_free(ip->i_dir_trace);
154 * Only if we are shutting down the fs will we see an
155 * inode still in the AIL. If it is there, we should remove
156 * it to prevent a use-after-free from occurring.
158 xfs_log_item_t *lip = &ip->i_itemp->ili_item;
159 struct xfs_ail *ailp = lip->li_ailp;
161 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
162 XFS_FORCED_SHUTDOWN(ip->i_mount));
163 if (lip->li_flags & XFS_LI_IN_AIL) {
164 spin_lock(&ailp->xa_lock);
165 if (lip->li_flags & XFS_LI_IN_AIL)
166 xfs_trans_ail_delete(ailp, lip);
168 spin_unlock(&ailp->xa_lock);
170 xfs_inode_item_destroy(ip);
174 /* asserts to verify all state is correct here */
175 ASSERT(atomic_read(&ip->i_iocount) == 0);
176 ASSERT(atomic_read(&ip->i_pincount) == 0);
177 ASSERT(!spin_is_locked(&ip->i_flags_lock));
178 ASSERT(completion_done(&ip->i_flush));
180 kmem_zone_free(xfs_inode_zone, ip);
184 * Check the validity of the inode we just found it the cache
188 struct xfs_perag *pag,
189 struct xfs_inode *ip,
191 int lock_flags) __releases(pag->pag_ici_lock)
193 struct inode *inode = VFS_I(ip);
194 struct xfs_mount *mp = ip->i_mount;
197 spin_lock(&ip->i_flags_lock);
200 * If we are racing with another cache hit that is currently
201 * instantiating this inode or currently recycling it out of
202 * reclaimabe state, wait for the initialisation to complete
205 * XXX(hch): eventually we should do something equivalent to
206 * wait_on_inode to wait for these flags to be cleared
207 * instead of polling for it.
209 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
210 XFS_STATS_INC(xs_ig_frecycle);
216 * If lookup is racing with unlink return an error immediately.
218 if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
224 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
225 * Need to carefully get it back into useable state.
227 if (ip->i_flags & XFS_IRECLAIMABLE) {
228 xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
231 * We need to set XFS_INEW atomically with clearing the
232 * reclaimable tag so that we do have an indicator of the
233 * inode still being initialized.
235 ip->i_flags |= XFS_INEW;
236 ip->i_flags &= ~XFS_IRECLAIMABLE;
237 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
239 spin_unlock(&ip->i_flags_lock);
240 read_unlock(&pag->pag_ici_lock);
242 error = -inode_init_always(mp->m_super, inode);
245 * Re-initializing the inode failed, and we are in deep
246 * trouble. Try to re-add it to the reclaim list.
248 read_lock(&pag->pag_ici_lock);
249 spin_lock(&ip->i_flags_lock);
251 ip->i_flags &= ~XFS_INEW;
252 ip->i_flags |= XFS_IRECLAIMABLE;
253 __xfs_inode_set_reclaim_tag(pag, ip);
256 inode->i_state = I_LOCK|I_NEW;
258 /* If the VFS inode is being torn down, pause and try again. */
264 /* We've got a live one. */
265 spin_unlock(&ip->i_flags_lock);
266 read_unlock(&pag->pag_ici_lock);
270 xfs_ilock(ip, lock_flags);
272 xfs_iflags_clear(ip, XFS_ISTALE);
273 xfs_itrace_exit_tag(ip, "xfs_iget.found");
274 XFS_STATS_INC(xs_ig_found);
278 spin_unlock(&ip->i_flags_lock);
279 read_unlock(&pag->pag_ici_lock);
286 struct xfs_mount *mp,
287 struct xfs_perag *pag,
290 struct xfs_inode **ipp,
293 int lock_flags) __releases(pag->pag_ici_lock)
295 struct xfs_inode *ip;
297 unsigned long first_index, mask;
298 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
300 ip = xfs_inode_alloc(mp, ino);
304 error = xfs_iread(mp, tp, ip, bno, flags);
308 xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
310 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
316 * Preload the radix tree so we can insert safely under the
317 * write spinlock. Note that we cannot sleep inside the preload
320 if (radix_tree_preload(GFP_KERNEL)) {
326 * Because the inode hasn't been added to the radix-tree yet it can't
327 * be found by another thread, so we can do the non-sleeping lock here.
330 if (!xfs_ilock_nowait(ip, lock_flags))
334 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
335 first_index = agino & mask;
336 write_lock(&pag->pag_ici_lock);
338 /* insert the new inode */
339 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
340 if (unlikely(error)) {
341 WARN_ON(error != -EEXIST);
342 XFS_STATS_INC(xs_ig_dup);
344 goto out_preload_end;
347 /* These values _must_ be set before releasing the radix tree lock! */
348 ip->i_udquot = ip->i_gdquot = NULL;
349 xfs_iflags_set(ip, XFS_INEW);
351 write_unlock(&pag->pag_ici_lock);
352 radix_tree_preload_end();
357 write_unlock(&pag->pag_ici_lock);
358 radix_tree_preload_end();
360 xfs_iunlock(ip, lock_flags);
362 __destroy_inode(VFS_I(ip));
368 * Look up an inode by number in the given file system.
369 * The inode is looked up in the cache held in each AG.
370 * If the inode is found in the cache, initialise the vfs inode
373 * If it is not in core, read it in from the file system's device,
374 * add it to the cache and initialise the vfs inode.
376 * The inode is locked according to the value of the lock_flags parameter.
377 * This flag parameter indicates how and if the inode's IO lock and inode lock
380 * mp -- the mount point structure for the current file system. It points
381 * to the inode hash table.
382 * tp -- a pointer to the current transaction if there is one. This is
383 * simply passed through to the xfs_iread() call.
384 * ino -- the number of the inode desired. This is the unique identifier
385 * within the file system for the inode being requested.
386 * lock_flags -- flags indicating how to lock the inode. See the comment
387 * for xfs_ilock() for a list of valid values.
388 * bno -- the block number starting the buffer containing the inode,
389 * if known (as by bulkstat), else 0.
406 /* the radix tree exists only in inode capable AGs */
407 if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi)
410 /* get the perag structure and ensure that it's inode capable */
411 pag = xfs_get_perag(mp, ino);
412 if (!pag->pagi_inodeok)
414 ASSERT(pag->pag_ici_init);
415 agino = XFS_INO_TO_AGINO(mp, ino);
419 read_lock(&pag->pag_ici_lock);
420 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
423 error = xfs_iget_cache_hit(pag, ip, flags, lock_flags);
425 goto out_error_or_again;
427 read_unlock(&pag->pag_ici_lock);
428 XFS_STATS_INC(xs_ig_missed);
430 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, bno,
433 goto out_error_or_again;
435 xfs_put_perag(mp, pag);
439 ASSERT(ip->i_df.if_ext_max ==
440 XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
442 * If we have a real type for an on-disk inode, we can set ops(&unlock)
443 * now. If it's a new inode being created, xfs_ialloc will handle it.
445 if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
450 if (error == EAGAIN) {
454 xfs_put_perag(mp, pag);
459 * Decrement reference count of an inode structure and unlock it.
461 * ip -- the inode being released
462 * lock_flags -- this parameter indicates the inode's locks to be
463 * to be released. See the comment on xfs_iunlock() for a list
467 xfs_iput(xfs_inode_t *ip,
470 xfs_itrace_entry(ip);
471 xfs_iunlock(ip, lock_flags);
476 * Special iput for brand-new inodes that are still locked
483 struct inode *inode = VFS_I(ip);
485 xfs_itrace_entry(ip);
487 if ((ip->i_d.di_mode == 0)) {
488 ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
489 make_bad_inode(inode);
491 if (inode->i_state & I_NEW)
492 unlock_new_inode(inode);
494 xfs_iunlock(ip, lock_flags);
499 * This is called free all the memory associated with an inode.
500 * It must free the inode itself and any buffers allocated for
501 * if_extents/if_data and if_broot. It must also free the lock
502 * associated with the inode.
504 * Note: because we don't initialise everything on reallocation out
505 * of the zone, we must ensure we nullify everything correctly before
506 * freeing the structure.
510 struct xfs_inode *ip)
512 struct xfs_mount *mp = ip->i_mount;
513 struct xfs_perag *pag;
515 XFS_STATS_INC(xs_ig_reclaims);
518 * Remove the inode from the per-AG radix tree. It doesn't matter
519 * if it was never added to it because radix_tree_delete can deal
520 * with that case just fine.
522 pag = xfs_get_perag(mp, ip->i_ino);
523 write_lock(&pag->pag_ici_lock);
524 radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino));
525 write_unlock(&pag->pag_ici_lock);
526 xfs_put_perag(mp, pag);
529 * Here we do an (almost) spurious inode lock in order to coordinate
530 * with inode cache radix tree lookups. This is because the lookup
531 * can reference the inodes in the cache without taking references.
533 * We make that OK here by ensuring that we wait until the inode is
534 * unlocked after the lookup before we go ahead and free it. We get
535 * both the ilock and the iolock because the code may need to drop the
536 * ilock one but will still hold the iolock.
538 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
540 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
546 * This is a wrapper routine around the xfs_ilock() routine
547 * used to centralize some grungy code. It is used in places
548 * that wish to lock the inode solely for reading the extents.
549 * The reason these places can't just call xfs_ilock(SHARED)
550 * is that the inode lock also guards to bringing in of the
551 * extents from disk for a file in b-tree format. If the inode
552 * is in b-tree format, then we need to lock the inode exclusively
553 * until the extents are read in. Locking it exclusively all
554 * the time would limit our parallelism unnecessarily, though.
555 * What we do instead is check to see if the extents have been
556 * read in yet, and only lock the inode exclusively if they
559 * The function returns a value which should be given to the
560 * corresponding xfs_iunlock_map_shared(). This value is
561 * the mode in which the lock was actually taken.
564 xfs_ilock_map_shared(
569 if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
570 ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
571 lock_mode = XFS_ILOCK_EXCL;
573 lock_mode = XFS_ILOCK_SHARED;
576 xfs_ilock(ip, lock_mode);
582 * This is simply the unlock routine to go with xfs_ilock_map_shared().
583 * All it does is call xfs_iunlock() with the given lock_mode.
586 xfs_iunlock_map_shared(
588 unsigned int lock_mode)
590 xfs_iunlock(ip, lock_mode);
594 * The xfs inode contains 2 locks: a multi-reader lock called the
595 * i_iolock and a multi-reader lock called the i_lock. This routine
596 * allows either or both of the locks to be obtained.
598 * The 2 locks should always be ordered so that the IO lock is
599 * obtained first in order to prevent deadlock.
601 * ip -- the inode being locked
602 * lock_flags -- this parameter indicates the inode's locks
603 * to be locked. It can be:
608 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
609 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
610 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
611 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
619 * You can't set both SHARED and EXCL for the same lock,
620 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
621 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
623 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
624 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
625 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
626 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
627 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
629 if (lock_flags & XFS_IOLOCK_EXCL)
630 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
631 else if (lock_flags & XFS_IOLOCK_SHARED)
632 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
634 if (lock_flags & XFS_ILOCK_EXCL)
635 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
636 else if (lock_flags & XFS_ILOCK_SHARED)
637 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
639 xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address);
643 * This is just like xfs_ilock(), except that the caller
644 * is guaranteed not to sleep. It returns 1 if it gets
645 * the requested locks and 0 otherwise. If the IO lock is
646 * obtained but the inode lock cannot be, then the IO lock
647 * is dropped before returning.
649 * ip -- the inode being locked
650 * lock_flags -- this parameter indicates the inode's locks to be
651 * to be locked. See the comment for xfs_ilock() for a list
660 * You can't set both SHARED and EXCL for the same lock,
661 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
662 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
664 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
665 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
666 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
667 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
668 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
670 if (lock_flags & XFS_IOLOCK_EXCL) {
671 if (!mrtryupdate(&ip->i_iolock))
673 } else if (lock_flags & XFS_IOLOCK_SHARED) {
674 if (!mrtryaccess(&ip->i_iolock))
677 if (lock_flags & XFS_ILOCK_EXCL) {
678 if (!mrtryupdate(&ip->i_lock))
679 goto out_undo_iolock;
680 } else if (lock_flags & XFS_ILOCK_SHARED) {
681 if (!mrtryaccess(&ip->i_lock))
682 goto out_undo_iolock;
684 xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address);
688 if (lock_flags & XFS_IOLOCK_EXCL)
689 mrunlock_excl(&ip->i_iolock);
690 else if (lock_flags & XFS_IOLOCK_SHARED)
691 mrunlock_shared(&ip->i_iolock);
697 * xfs_iunlock() is used to drop the inode locks acquired with
698 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
699 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
700 * that we know which locks to drop.
702 * ip -- the inode being unlocked
703 * lock_flags -- this parameter indicates the inode's locks to be
704 * to be unlocked. See the comment for xfs_ilock() for a list
705 * of valid values for this parameter.
714 * You can't set both SHARED and EXCL for the same lock,
715 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
716 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
718 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
719 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
720 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
721 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
722 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY |
723 XFS_LOCK_DEP_MASK)) == 0);
724 ASSERT(lock_flags != 0);
726 if (lock_flags & XFS_IOLOCK_EXCL)
727 mrunlock_excl(&ip->i_iolock);
728 else if (lock_flags & XFS_IOLOCK_SHARED)
729 mrunlock_shared(&ip->i_iolock);
731 if (lock_flags & XFS_ILOCK_EXCL)
732 mrunlock_excl(&ip->i_lock);
733 else if (lock_flags & XFS_ILOCK_SHARED)
734 mrunlock_shared(&ip->i_lock);
736 if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) &&
737 !(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) {
739 * Let the AIL know that this item has been unlocked in case
740 * it is in the AIL and anyone is waiting on it. Don't do
741 * this if the caller has asked us not to.
743 xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp,
744 (xfs_log_item_t*)(ip->i_itemp));
746 xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address);
750 * give up write locks. the i/o lock cannot be held nested
751 * if it is being demoted.
758 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
759 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
761 if (lock_flags & XFS_ILOCK_EXCL)
762 mrdemote(&ip->i_lock);
763 if (lock_flags & XFS_IOLOCK_EXCL)
764 mrdemote(&ip->i_iolock);
769 * Debug-only routine, without additional rw_semaphore APIs, we can
770 * now only answer requests regarding whether we hold the lock for write
771 * (reader state is outside our visibility, we only track writer state).
773 * Note: this means !xfs_isilocked would give false positives, so don't do that.
780 if ((lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) ==
782 if (!ip->i_lock.mr_writer)
786 if ((lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) ==
788 if (!ip->i_iolock.mr_writer)
796 #ifdef XFS_INODE_TRACE
798 #define KTRACE_ENTER(ip, vk, s, line, ra) \
799 ktrace_enter((ip)->i_trace, \
800 /* 0 */ (void *)(__psint_t)(vk), \
801 /* 1 */ (void *)(s), \
802 /* 2 */ (void *)(__psint_t) line, \
803 /* 3 */ (void *)(__psint_t)atomic_read(&VFS_I(ip)->i_count), \
804 /* 4 */ (void *)(ra), \
806 /* 6 */ (void *)(__psint_t)current_cpu(), \
807 /* 7 */ (void *)(__psint_t)current_pid(), \
808 /* 8 */ (void *)__return_address, \
809 /* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL)
812 * Vnode tracing code.
815 _xfs_itrace_entry(xfs_inode_t *ip, const char *func, inst_t *ra)
817 KTRACE_ENTER(ip, INODE_KTRACE_ENTRY, func, 0, ra);
821 _xfs_itrace_exit(xfs_inode_t *ip, const char *func, inst_t *ra)
823 KTRACE_ENTER(ip, INODE_KTRACE_EXIT, func, 0, ra);
827 xfs_itrace_hold(xfs_inode_t *ip, char *file, int line, inst_t *ra)
829 KTRACE_ENTER(ip, INODE_KTRACE_HOLD, file, line, ra);
833 _xfs_itrace_ref(xfs_inode_t *ip, char *file, int line, inst_t *ra)
835 KTRACE_ENTER(ip, INODE_KTRACE_REF, file, line, ra);
839 xfs_itrace_rele(xfs_inode_t *ip, char *file, int line, inst_t *ra)
841 KTRACE_ENTER(ip, INODE_KTRACE_RELE, file, line, ra);
843 #endif /* XFS_INODE_TRACE */