4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
10 * Notes on the allocation strategy:
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
17 #include <linux/syscalls.h>
18 #include <linux/string.h>
21 #include <linux/fsnotify.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/export.h>
27 #include <linux/mount.h>
28 #include <linux/file.h>
29 #include <asm/uaccess.h>
30 #include <linux/security.h>
31 #include <linux/seqlock.h>
32 #include <linux/swap.h>
33 #include <linux/bootmem.h>
34 #include <linux/fs_struct.h>
35 #include <linux/hardirq.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/rculist_bl.h>
38 #include <linux/prefetch.h>
39 #include <linux/ratelimit.h>
45 * dcache->d_inode->i_lock protects:
46 * - i_dentry, d_alias, d_inode of aliases
47 * dcache_hash_bucket lock protects:
48 * - the dcache hash table
49 * s_anon bl list spinlock protects:
50 * - the s_anon list (see __d_drop)
51 * dentry->d_sb->s_dentry_lru_lock protects:
52 * - the dcache lru lists and counters
59 * - d_parent and d_subdirs
60 * - childrens' d_child and d_parent
64 * dentry->d_inode->i_lock
66 * dentry->d_sb->s_dentry_lru_lock
67 * dcache_hash_bucket lock
70 * If there is an ancestor relationship:
71 * dentry->d_parent->...->d_parent->d_lock
73 * dentry->d_parent->d_lock
76 * If no ancestor relationship:
77 * if (dentry1 < dentry2)
81 int sysctl_vfs_cache_pressure __read_mostly = 100;
82 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
84 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
86 EXPORT_SYMBOL(rename_lock);
88 static struct kmem_cache *dentry_cache __read_mostly;
91 * read_seqbegin_or_lock - begin a sequence number check or locking block
93 * seq : sequence number to be checked
95 * First try it once optimistically without taking the lock. If that fails,
96 * take the lock. The sequence number is also used as a marker for deciding
97 * whether to be a reader (even) or writer (odd).
98 * N.B. seq must be initialized to an even number to begin with.
100 static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
102 if (!(*seq & 1)) /* Even */
103 *seq = read_seqbegin(lock);
108 static inline int need_seqretry(seqlock_t *lock, int seq)
110 return !(seq & 1) && read_seqretry(lock, seq);
113 static inline void done_seqretry(seqlock_t *lock, int seq)
116 write_sequnlock(lock);
120 * This is the single most critical data structure when it comes
121 * to the dcache: the hashtable for lookups. Somebody should try
122 * to make this good - I've just made it work.
124 * This hash-function tries to avoid losing too many bits of hash
125 * information, yet avoid using a prime hash-size or similar.
127 #define D_HASHBITS d_hash_shift
128 #define D_HASHMASK d_hash_mask
130 static unsigned int d_hash_mask __read_mostly;
131 static unsigned int d_hash_shift __read_mostly;
133 static struct hlist_bl_head *dentry_hashtable __read_mostly;
135 static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
138 hash += (unsigned long) parent / L1_CACHE_BYTES;
139 hash = hash + (hash >> D_HASHBITS);
140 return dentry_hashtable + (hash & D_HASHMASK);
143 /* Statistics gathering. */
144 struct dentry_stat_t dentry_stat = {
148 static DEFINE_PER_CPU(long, nr_dentry);
149 static DEFINE_PER_CPU(long, nr_dentry_unused);
151 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
154 * Here we resort to our own counters instead of using generic per-cpu counters
155 * for consistency with what the vfs inode code does. We are expected to harvest
156 * better code and performance by having our own specialized counters.
158 * Please note that the loop is done over all possible CPUs, not over all online
159 * CPUs. The reason for this is that we don't want to play games with CPUs going
160 * on and off. If one of them goes off, we will just keep their counters.
162 * glommer: See cffbc8a for details, and if you ever intend to change this,
163 * please update all vfs counters to match.
165 static long get_nr_dentry(void)
169 for_each_possible_cpu(i)
170 sum += per_cpu(nr_dentry, i);
171 return sum < 0 ? 0 : sum;
174 static long get_nr_dentry_unused(void)
178 for_each_possible_cpu(i)
179 sum += per_cpu(nr_dentry_unused, i);
180 return sum < 0 ? 0 : sum;
183 int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
184 size_t *lenp, loff_t *ppos)
186 dentry_stat.nr_dentry = get_nr_dentry();
187 dentry_stat.nr_unused = get_nr_dentry_unused();
188 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
193 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
194 * The strings are both count bytes long, and count is non-zero.
196 #ifdef CONFIG_DCACHE_WORD_ACCESS
198 #include <asm/word-at-a-time.h>
200 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
201 * aligned allocation for this particular component. We don't
202 * strictly need the load_unaligned_zeropad() safety, but it
203 * doesn't hurt either.
205 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
206 * need the careful unaligned handling.
208 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
210 unsigned long a,b,mask;
213 a = *(unsigned long *)cs;
214 b = load_unaligned_zeropad(ct);
215 if (tcount < sizeof(unsigned long))
217 if (unlikely(a != b))
219 cs += sizeof(unsigned long);
220 ct += sizeof(unsigned long);
221 tcount -= sizeof(unsigned long);
225 mask = ~(~0ul << tcount*8);
226 return unlikely(!!((a ^ b) & mask));
231 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
245 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
247 const unsigned char *cs;
249 * Be careful about RCU walk racing with rename:
250 * use ACCESS_ONCE to fetch the name pointer.
252 * NOTE! Even if a rename will mean that the length
253 * was not loaded atomically, we don't care. The
254 * RCU walk will check the sequence count eventually,
255 * and catch it. And we won't overrun the buffer,
256 * because we're reading the name pointer atomically,
257 * and a dentry name is guaranteed to be properly
258 * terminated with a NUL byte.
260 * End result: even if 'len' is wrong, we'll exit
261 * early because the data cannot match (there can
262 * be no NUL in the ct/tcount data)
264 cs = ACCESS_ONCE(dentry->d_name.name);
265 smp_read_barrier_depends();
266 return dentry_string_cmp(cs, ct, tcount);
269 static void __d_free(struct rcu_head *head)
271 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
273 WARN_ON(!hlist_unhashed(&dentry->d_alias));
274 if (dname_external(dentry))
275 kfree(dentry->d_name.name);
276 kmem_cache_free(dentry_cache, dentry);
282 static void d_free(struct dentry *dentry)
284 BUG_ON((int)dentry->d_lockref.count > 0);
285 this_cpu_dec(nr_dentry);
286 if (dentry->d_op && dentry->d_op->d_release)
287 dentry->d_op->d_release(dentry);
289 /* if dentry was never visible to RCU, immediate free is OK */
290 if (!(dentry->d_flags & DCACHE_RCUACCESS))
291 __d_free(&dentry->d_u.d_rcu);
293 call_rcu(&dentry->d_u.d_rcu, __d_free);
297 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
298 * @dentry: the target dentry
299 * After this call, in-progress rcu-walk path lookup will fail. This
300 * should be called after unhashing, and after changing d_inode (if
301 * the dentry has not already been unhashed).
303 static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
305 assert_spin_locked(&dentry->d_lock);
306 /* Go through a barrier */
307 write_seqcount_barrier(&dentry->d_seq);
311 * Release the dentry's inode, using the filesystem
312 * d_iput() operation if defined. Dentry has no refcount
315 static void dentry_iput(struct dentry * dentry)
316 __releases(dentry->d_lock)
317 __releases(dentry->d_inode->i_lock)
319 struct inode *inode = dentry->d_inode;
321 dentry->d_inode = NULL;
322 hlist_del_init(&dentry->d_alias);
323 spin_unlock(&dentry->d_lock);
324 spin_unlock(&inode->i_lock);
326 fsnotify_inoderemove(inode);
327 if (dentry->d_op && dentry->d_op->d_iput)
328 dentry->d_op->d_iput(dentry, inode);
332 spin_unlock(&dentry->d_lock);
337 * Release the dentry's inode, using the filesystem
338 * d_iput() operation if defined. dentry remains in-use.
340 static void dentry_unlink_inode(struct dentry * dentry)
341 __releases(dentry->d_lock)
342 __releases(dentry->d_inode->i_lock)
344 struct inode *inode = dentry->d_inode;
345 dentry->d_inode = NULL;
346 hlist_del_init(&dentry->d_alias);
347 dentry_rcuwalk_barrier(dentry);
348 spin_unlock(&dentry->d_lock);
349 spin_unlock(&inode->i_lock);
351 fsnotify_inoderemove(inode);
352 if (dentry->d_op && dentry->d_op->d_iput)
353 dentry->d_op->d_iput(dentry, inode);
359 * dentry_lru_(add|del|move_list) must be called with d_lock held.
361 static void dentry_lru_add(struct dentry *dentry)
363 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) {
364 spin_lock(&dentry->d_sb->s_dentry_lru_lock);
365 dentry->d_flags |= DCACHE_LRU_LIST;
366 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
367 dentry->d_sb->s_nr_dentry_unused++;
368 this_cpu_inc(nr_dentry_unused);
369 spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
373 static void __dentry_lru_del(struct dentry *dentry)
375 list_del_init(&dentry->d_lru);
376 dentry->d_flags &= ~DCACHE_LRU_LIST;
377 dentry->d_sb->s_nr_dentry_unused--;
378 this_cpu_dec(nr_dentry_unused);
382 * Remove a dentry with references from the LRU.
384 * If we are on the shrink list, then we can get to try_prune_one_dentry() and
385 * lose our last reference through the parent walk. In this case, we need to
386 * remove ourselves from the shrink list, not the LRU.
388 static void dentry_lru_del(struct dentry *dentry)
390 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
391 list_del_init(&dentry->d_lru);
392 dentry->d_flags &= ~DCACHE_SHRINK_LIST;
396 if (!list_empty(&dentry->d_lru)) {
397 spin_lock(&dentry->d_sb->s_dentry_lru_lock);
398 __dentry_lru_del(dentry);
399 spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
403 static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
405 BUG_ON(dentry->d_flags & DCACHE_SHRINK_LIST);
407 spin_lock(&dentry->d_sb->s_dentry_lru_lock);
408 if (list_empty(&dentry->d_lru)) {
409 dentry->d_flags |= DCACHE_LRU_LIST;
410 list_add_tail(&dentry->d_lru, list);
412 list_move_tail(&dentry->d_lru, list);
413 dentry->d_sb->s_nr_dentry_unused--;
414 this_cpu_dec(nr_dentry_unused);
416 spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
420 * d_kill - kill dentry and return parent
421 * @dentry: dentry to kill
422 * @parent: parent dentry
424 * The dentry must already be unhashed and removed from the LRU.
426 * If this is the root of the dentry tree, return NULL.
428 * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by
431 static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
432 __releases(dentry->d_lock)
433 __releases(parent->d_lock)
434 __releases(dentry->d_inode->i_lock)
436 list_del(&dentry->d_u.d_child);
438 * Inform try_to_ascend() that we are no longer attached to the
441 dentry->d_flags |= DCACHE_DENTRY_KILLED;
443 spin_unlock(&parent->d_lock);
446 * dentry_iput drops the locks, at which point nobody (except
447 * transient RCU lookups) can reach this dentry.
454 * Unhash a dentry without inserting an RCU walk barrier or checking that
455 * dentry->d_lock is locked. The caller must take care of that, if
458 static void __d_shrink(struct dentry *dentry)
460 if (!d_unhashed(dentry)) {
461 struct hlist_bl_head *b;
462 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
463 b = &dentry->d_sb->s_anon;
465 b = d_hash(dentry->d_parent, dentry->d_name.hash);
468 __hlist_bl_del(&dentry->d_hash);
469 dentry->d_hash.pprev = NULL;
475 * d_drop - drop a dentry
476 * @dentry: dentry to drop
478 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
479 * be found through a VFS lookup any more. Note that this is different from
480 * deleting the dentry - d_delete will try to mark the dentry negative if
481 * possible, giving a successful _negative_ lookup, while d_drop will
482 * just make the cache lookup fail.
484 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
485 * reason (NFS timeouts or autofs deletes).
487 * __d_drop requires dentry->d_lock.
489 void __d_drop(struct dentry *dentry)
491 if (!d_unhashed(dentry)) {
493 dentry_rcuwalk_barrier(dentry);
496 EXPORT_SYMBOL(__d_drop);
498 void d_drop(struct dentry *dentry)
500 spin_lock(&dentry->d_lock);
502 spin_unlock(&dentry->d_lock);
504 EXPORT_SYMBOL(d_drop);
507 * Finish off a dentry we've decided to kill.
508 * dentry->d_lock must be held, returns with it unlocked.
509 * If ref is non-zero, then decrement the refcount too.
510 * Returns dentry requiring refcount drop, or NULL if we're done.
512 static inline struct dentry *
513 dentry_kill(struct dentry *dentry, int unlock_on_failure)
514 __releases(dentry->d_lock)
517 struct dentry *parent;
519 inode = dentry->d_inode;
520 if (inode && !spin_trylock(&inode->i_lock)) {
522 if (unlock_on_failure) {
523 spin_unlock(&dentry->d_lock);
526 return dentry; /* try again with same dentry */
531 parent = dentry->d_parent;
532 if (parent && !spin_trylock(&parent->d_lock)) {
534 spin_unlock(&inode->i_lock);
539 * The dentry is now unrecoverably dead to the world.
541 lockref_mark_dead(&dentry->d_lockref);
544 * inform the fs via d_prune that this dentry is about to be
545 * unhashed and destroyed.
547 if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry))
548 dentry->d_op->d_prune(dentry);
550 dentry_lru_del(dentry);
551 /* if it was on the hash then remove it */
553 return d_kill(dentry, parent);
559 * This is complicated by the fact that we do not want to put
560 * dentries that are no longer on any hash chain on the unused
561 * list: we'd much rather just get rid of them immediately.
563 * However, that implies that we have to traverse the dentry
564 * tree upwards to the parents which might _also_ now be
565 * scheduled for deletion (it may have been only waiting for
566 * its last child to go away).
568 * This tail recursion is done by hand as we don't want to depend
569 * on the compiler to always get this right (gcc generally doesn't).
570 * Real recursion would eat up our stack space.
574 * dput - release a dentry
575 * @dentry: dentry to release
577 * Release a dentry. This will drop the usage count and if appropriate
578 * call the dentry unlink method as well as removing it from the queues and
579 * releasing its resources. If the parent dentries were scheduled for release
580 * they too may now get deleted.
582 void dput(struct dentry *dentry)
584 if (unlikely(!dentry))
588 if (lockref_put_or_lock(&dentry->d_lockref))
591 /* Unreachable? Get rid of it */
592 if (unlikely(d_unhashed(dentry)))
595 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
596 if (dentry->d_op->d_delete(dentry))
600 dentry->d_flags |= DCACHE_REFERENCED;
601 dentry_lru_add(dentry);
603 dentry->d_lockref.count--;
604 spin_unlock(&dentry->d_lock);
608 dentry = dentry_kill(dentry, 1);
615 * d_invalidate - invalidate a dentry
616 * @dentry: dentry to invalidate
618 * Try to invalidate the dentry if it turns out to be
619 * possible. If there are other dentries that can be
620 * reached through this one we can't delete it and we
621 * return -EBUSY. On success we return 0.
626 int d_invalidate(struct dentry * dentry)
629 * If it's already been dropped, return OK.
631 spin_lock(&dentry->d_lock);
632 if (d_unhashed(dentry)) {
633 spin_unlock(&dentry->d_lock);
637 * Check whether to do a partial shrink_dcache
638 * to get rid of unused child entries.
640 if (!list_empty(&dentry->d_subdirs)) {
641 spin_unlock(&dentry->d_lock);
642 shrink_dcache_parent(dentry);
643 spin_lock(&dentry->d_lock);
647 * Somebody else still using it?
649 * If it's a directory, we can't drop it
650 * for fear of somebody re-populating it
651 * with children (even though dropping it
652 * would make it unreachable from the root,
653 * we might still populate it if it was a
654 * working directory or similar).
655 * We also need to leave mountpoints alone,
658 if (dentry->d_lockref.count > 1 && dentry->d_inode) {
659 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
660 spin_unlock(&dentry->d_lock);
666 spin_unlock(&dentry->d_lock);
669 EXPORT_SYMBOL(d_invalidate);
671 /* This must be called with d_lock held */
672 static inline void __dget_dlock(struct dentry *dentry)
674 dentry->d_lockref.count++;
677 static inline void __dget(struct dentry *dentry)
679 lockref_get(&dentry->d_lockref);
682 struct dentry *dget_parent(struct dentry *dentry)
688 * Do optimistic parent lookup without any
692 ret = ACCESS_ONCE(dentry->d_parent);
693 gotref = lockref_get_not_zero(&ret->d_lockref);
695 if (likely(gotref)) {
696 if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
703 * Don't need rcu_dereference because we re-check it was correct under
707 ret = dentry->d_parent;
708 spin_lock(&ret->d_lock);
709 if (unlikely(ret != dentry->d_parent)) {
710 spin_unlock(&ret->d_lock);
715 BUG_ON(!ret->d_lockref.count);
716 ret->d_lockref.count++;
717 spin_unlock(&ret->d_lock);
720 EXPORT_SYMBOL(dget_parent);
723 * d_find_alias - grab a hashed alias of inode
724 * @inode: inode in question
725 * @want_discon: flag, used by d_splice_alias, to request
726 * that only a DISCONNECTED alias be returned.
728 * If inode has a hashed alias, or is a directory and has any alias,
729 * acquire the reference to alias and return it. Otherwise return NULL.
730 * Notice that if inode is a directory there can be only one alias and
731 * it can be unhashed only if it has no children, or if it is the root
734 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
735 * any other hashed alias over that one unless @want_discon is set,
736 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
738 static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
740 struct dentry *alias, *discon_alias;
744 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
745 spin_lock(&alias->d_lock);
746 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
747 if (IS_ROOT(alias) &&
748 (alias->d_flags & DCACHE_DISCONNECTED)) {
749 discon_alias = alias;
750 } else if (!want_discon) {
752 spin_unlock(&alias->d_lock);
756 spin_unlock(&alias->d_lock);
759 alias = discon_alias;
760 spin_lock(&alias->d_lock);
761 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
762 if (IS_ROOT(alias) &&
763 (alias->d_flags & DCACHE_DISCONNECTED)) {
765 spin_unlock(&alias->d_lock);
769 spin_unlock(&alias->d_lock);
775 struct dentry *d_find_alias(struct inode *inode)
777 struct dentry *de = NULL;
779 if (!hlist_empty(&inode->i_dentry)) {
780 spin_lock(&inode->i_lock);
781 de = __d_find_alias(inode, 0);
782 spin_unlock(&inode->i_lock);
786 EXPORT_SYMBOL(d_find_alias);
789 * Try to kill dentries associated with this inode.
790 * WARNING: you must own a reference to inode.
792 void d_prune_aliases(struct inode *inode)
794 struct dentry *dentry;
796 spin_lock(&inode->i_lock);
797 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
798 spin_lock(&dentry->d_lock);
799 if (!dentry->d_lockref.count) {
801 * inform the fs via d_prune that this dentry
802 * is about to be unhashed and destroyed.
804 if ((dentry->d_flags & DCACHE_OP_PRUNE) &&
806 dentry->d_op->d_prune(dentry);
808 __dget_dlock(dentry);
810 spin_unlock(&dentry->d_lock);
811 spin_unlock(&inode->i_lock);
815 spin_unlock(&dentry->d_lock);
817 spin_unlock(&inode->i_lock);
819 EXPORT_SYMBOL(d_prune_aliases);
822 * Try to throw away a dentry - free the inode, dput the parent.
823 * Requires dentry->d_lock is held, and dentry->d_count == 0.
824 * Releases dentry->d_lock.
826 * This may fail if locks cannot be acquired no problem, just try again.
828 static struct dentry * try_prune_one_dentry(struct dentry *dentry)
829 __releases(dentry->d_lock)
831 struct dentry *parent;
833 parent = dentry_kill(dentry, 0);
835 * If dentry_kill returns NULL, we have nothing more to do.
836 * if it returns the same dentry, trylocks failed. In either
837 * case, just loop again.
839 * Otherwise, we need to prune ancestors too. This is necessary
840 * to prevent quadratic behavior of shrink_dcache_parent(), but
841 * is also expected to be beneficial in reducing dentry cache
846 if (parent == dentry)
849 /* Prune ancestors. */
852 if (lockref_put_or_lock(&dentry->d_lockref))
854 dentry = dentry_kill(dentry, 1);
859 static void shrink_dentry_list(struct list_head *list)
861 struct dentry *dentry;
865 dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
866 if (&dentry->d_lru == list)
868 spin_lock(&dentry->d_lock);
869 if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
870 spin_unlock(&dentry->d_lock);
875 * The dispose list is isolated and dentries are not accounted
876 * to the LRU here, so we can simply remove it from the list
877 * here regardless of whether it is referenced or not.
879 list_del_init(&dentry->d_lru);
880 dentry->d_flags &= ~DCACHE_SHRINK_LIST;
883 * We found an inuse dentry which was not removed from
884 * the LRU because of laziness during lookup. Do not free it.
886 if (dentry->d_lockref.count) {
887 spin_unlock(&dentry->d_lock);
892 dentry = try_prune_one_dentry(dentry);
896 dentry->d_flags |= DCACHE_SHRINK_LIST;
897 list_add(&dentry->d_lru, list);
898 spin_unlock(&dentry->d_lock);
905 * prune_dcache_sb - shrink the dcache
907 * @count: number of entries to try to free
909 * Attempt to shrink the superblock dcache LRU by @count entries. This is
910 * done when we need more memory an called from the superblock shrinker
913 * This function may fail to free any resources if all the dentries are in
916 void prune_dcache_sb(struct super_block *sb, int count)
918 struct dentry *dentry;
919 LIST_HEAD(referenced);
923 spin_lock(&sb->s_dentry_lru_lock);
924 while (!list_empty(&sb->s_dentry_lru)) {
925 dentry = list_entry(sb->s_dentry_lru.prev,
926 struct dentry, d_lru);
927 BUG_ON(dentry->d_sb != sb);
929 if (!spin_trylock(&dentry->d_lock)) {
930 spin_unlock(&sb->s_dentry_lru_lock);
935 if (dentry->d_flags & DCACHE_REFERENCED) {
936 dentry->d_flags &= ~DCACHE_REFERENCED;
937 list_move(&dentry->d_lru, &referenced);
938 spin_unlock(&dentry->d_lock);
940 list_move(&dentry->d_lru, &tmp);
941 dentry->d_flags |= DCACHE_SHRINK_LIST;
942 this_cpu_dec(nr_dentry_unused);
943 sb->s_nr_dentry_unused--;
944 spin_unlock(&dentry->d_lock);
948 cond_resched_lock(&sb->s_dentry_lru_lock);
950 if (!list_empty(&referenced))
951 list_splice(&referenced, &sb->s_dentry_lru);
952 spin_unlock(&sb->s_dentry_lru_lock);
954 shrink_dentry_list(&tmp);
958 * Mark all the dentries as on being the dispose list so we don't think they are
959 * still on the LRU if we try to kill them from ascending the parent chain in
960 * try_prune_one_dentry() rather than directly from the dispose list.
964 struct list_head *dispose)
966 struct dentry *dentry;
969 list_for_each_entry_rcu(dentry, dispose, d_lru) {
970 spin_lock(&dentry->d_lock);
971 dentry->d_flags |= DCACHE_SHRINK_LIST;
972 spin_unlock(&dentry->d_lock);
975 shrink_dentry_list(dispose);
979 * shrink_dcache_sb - shrink dcache for a superblock
982 * Shrink the dcache for the specified super block. This is used to free
983 * the dcache before unmounting a file system.
985 void shrink_dcache_sb(struct super_block *sb)
989 spin_lock(&sb->s_dentry_lru_lock);
990 while (!list_empty(&sb->s_dentry_lru)) {
992 * account for removal here so we don't need to handle it later
993 * even though the dentry is no longer on the lru list.
995 list_splice_init(&sb->s_dentry_lru, &tmp);
996 this_cpu_sub(nr_dentry_unused, sb->s_nr_dentry_unused);
997 sb->s_nr_dentry_unused = 0;
998 spin_unlock(&sb->s_dentry_lru_lock);
1000 shrink_dcache_list(&tmp);
1002 spin_lock(&sb->s_dentry_lru_lock);
1004 spin_unlock(&sb->s_dentry_lru_lock);
1006 EXPORT_SYMBOL(shrink_dcache_sb);
1009 * destroy a single subtree of dentries for unmount
1010 * - see the comments on shrink_dcache_for_umount() for a description of the
1013 static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
1015 struct dentry *parent;
1017 BUG_ON(!IS_ROOT(dentry));
1020 /* descend to the first leaf in the current subtree */
1021 while (!list_empty(&dentry->d_subdirs))
1022 dentry = list_entry(dentry->d_subdirs.next,
1023 struct dentry, d_u.d_child);
1025 /* consume the dentries from this leaf up through its parents
1026 * until we find one with children or run out altogether */
1028 struct inode *inode;
1031 * inform the fs that this dentry is about to be
1032 * unhashed and destroyed.
1034 if ((dentry->d_flags & DCACHE_OP_PRUNE) &&
1035 !d_unhashed(dentry))
1036 dentry->d_op->d_prune(dentry);
1038 dentry_lru_del(dentry);
1041 if (dentry->d_lockref.count != 0) {
1043 "BUG: Dentry %p{i=%lx,n=%s}"
1044 " still in use (%d)"
1045 " [unmount of %s %s]\n",
1048 dentry->d_inode->i_ino : 0UL,
1049 dentry->d_name.name,
1050 dentry->d_lockref.count,
1051 dentry->d_sb->s_type->name,
1052 dentry->d_sb->s_id);
1056 if (IS_ROOT(dentry)) {
1058 list_del(&dentry->d_u.d_child);
1060 parent = dentry->d_parent;
1061 parent->d_lockref.count--;
1062 list_del(&dentry->d_u.d_child);
1065 inode = dentry->d_inode;
1067 dentry->d_inode = NULL;
1068 hlist_del_init(&dentry->d_alias);
1069 if (dentry->d_op && dentry->d_op->d_iput)
1070 dentry->d_op->d_iput(dentry, inode);
1077 /* finished when we fall off the top of the tree,
1078 * otherwise we ascend to the parent and move to the
1079 * next sibling if there is one */
1083 } while (list_empty(&dentry->d_subdirs));
1085 dentry = list_entry(dentry->d_subdirs.next,
1086 struct dentry, d_u.d_child);
1091 * destroy the dentries attached to a superblock on unmounting
1092 * - we don't need to use dentry->d_lock because:
1093 * - the superblock is detached from all mountings and open files, so the
1094 * dentry trees will not be rearranged by the VFS
1095 * - s_umount is write-locked, so the memory pressure shrinker will ignore
1096 * any dentries belonging to this superblock that it comes across
1097 * - the filesystem itself is no longer permitted to rearrange the dentries
1098 * in this superblock
1100 void shrink_dcache_for_umount(struct super_block *sb)
1102 struct dentry *dentry;
1104 if (down_read_trylock(&sb->s_umount))
1107 dentry = sb->s_root;
1109 dentry->d_lockref.count--;
1110 shrink_dcache_for_umount_subtree(dentry);
1112 while (!hlist_bl_empty(&sb->s_anon)) {
1113 dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
1114 shrink_dcache_for_umount_subtree(dentry);
1119 * This tries to ascend one level of parenthood, but
1120 * we can race with renaming, so we need to re-check
1121 * the parenthood after dropping the lock and check
1122 * that the sequence number still matches.
1124 static struct dentry *try_to_ascend(struct dentry *old, unsigned seq)
1126 struct dentry *new = old->d_parent;
1129 spin_unlock(&old->d_lock);
1130 spin_lock(&new->d_lock);
1133 * might go back up the wrong parent if we have had a rename
1136 if (new != old->d_parent ||
1137 (old->d_flags & DCACHE_DENTRY_KILLED) ||
1138 need_seqretry(&rename_lock, seq)) {
1139 spin_unlock(&new->d_lock);
1147 * enum d_walk_ret - action to talke during tree walk
1148 * @D_WALK_CONTINUE: contrinue walk
1149 * @D_WALK_QUIT: quit walk
1150 * @D_WALK_NORETRY: quit when retry is needed
1151 * @D_WALK_SKIP: skip this dentry and its children
1161 * d_walk - walk the dentry tree
1162 * @parent: start of walk
1163 * @data: data passed to @enter() and @finish()
1164 * @enter: callback when first entering the dentry
1165 * @finish: callback when successfully finished the walk
1167 * The @enter() and @finish() callbacks are called with d_lock held.
1169 static void d_walk(struct dentry *parent, void *data,
1170 enum d_walk_ret (*enter)(void *, struct dentry *),
1171 void (*finish)(void *))
1173 struct dentry *this_parent;
1174 struct list_head *next;
1176 enum d_walk_ret ret;
1180 read_seqbegin_or_lock(&rename_lock, &seq);
1181 this_parent = parent;
1182 spin_lock(&this_parent->d_lock);
1184 ret = enter(data, this_parent);
1186 case D_WALK_CONTINUE:
1191 case D_WALK_NORETRY:
1196 next = this_parent->d_subdirs.next;
1198 while (next != &this_parent->d_subdirs) {
1199 struct list_head *tmp = next;
1200 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1203 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1205 ret = enter(data, dentry);
1207 case D_WALK_CONTINUE:
1210 spin_unlock(&dentry->d_lock);
1212 case D_WALK_NORETRY:
1216 spin_unlock(&dentry->d_lock);
1220 if (!list_empty(&dentry->d_subdirs)) {
1221 spin_unlock(&this_parent->d_lock);
1222 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1223 this_parent = dentry;
1224 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1227 spin_unlock(&dentry->d_lock);
1230 * All done at this level ... ascend and resume the search.
1232 if (this_parent != parent) {
1233 struct dentry *child = this_parent;
1234 this_parent = try_to_ascend(this_parent, seq);
1237 next = child->d_u.d_child.next;
1240 if (need_seqretry(&rename_lock, seq)) {
1241 spin_unlock(&this_parent->d_lock);
1248 spin_unlock(&this_parent->d_lock);
1249 done_seqretry(&rename_lock, seq);
1260 * Search for at least 1 mount point in the dentry's subdirs.
1261 * We descend to the next level whenever the d_subdirs
1262 * list is non-empty and continue searching.
1266 * have_submounts - check for mounts over a dentry
1267 * @parent: dentry to check.
1269 * Return true if the parent or its subdirectories contain
1273 static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
1276 if (d_mountpoint(dentry)) {
1280 return D_WALK_CONTINUE;
1283 int have_submounts(struct dentry *parent)
1287 d_walk(parent, &ret, check_mount, NULL);
1291 EXPORT_SYMBOL(have_submounts);
1294 * Called by mount code to set a mountpoint and check if the mountpoint is
1295 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1296 * subtree can become unreachable).
1298 * Only one of check_submounts_and_drop() and d_set_mounted() must succeed. For
1299 * this reason take rename_lock and d_lock on dentry and ancestors.
1301 int d_set_mounted(struct dentry *dentry)
1305 write_seqlock(&rename_lock);
1306 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1307 /* Need exclusion wrt. check_submounts_and_drop() */
1308 spin_lock(&p->d_lock);
1309 if (unlikely(d_unhashed(p))) {
1310 spin_unlock(&p->d_lock);
1313 spin_unlock(&p->d_lock);
1315 spin_lock(&dentry->d_lock);
1316 if (!d_unlinked(dentry)) {
1317 dentry->d_flags |= DCACHE_MOUNTED;
1320 spin_unlock(&dentry->d_lock);
1322 write_sequnlock(&rename_lock);
1327 * Search the dentry child list of the specified parent,
1328 * and move any unused dentries to the end of the unused
1329 * list for prune_dcache(). We descend to the next level
1330 * whenever the d_subdirs list is non-empty and continue
1333 * It returns zero iff there are no unused children,
1334 * otherwise it returns the number of children moved to
1335 * the end of the unused list. This may not be the total
1336 * number of unused children, because select_parent can
1337 * drop the lock and return early due to latency
1341 struct select_data {
1342 struct dentry *start;
1343 struct list_head dispose;
1347 static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1349 struct select_data *data = _data;
1350 enum d_walk_ret ret = D_WALK_CONTINUE;
1352 if (data->start == dentry)
1356 * move only zero ref count dentries to the dispose list.
1358 * Those which are presently on the shrink list, being processed
1359 * by shrink_dentry_list(), shouldn't be moved. Otherwise the
1360 * loop in shrink_dcache_parent() might not make any progress
1363 if (dentry->d_lockref.count) {
1364 dentry_lru_del(dentry);
1365 } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
1366 dentry_lru_move_list(dentry, &data->dispose);
1367 dentry->d_flags |= DCACHE_SHRINK_LIST;
1369 ret = D_WALK_NORETRY;
1372 * We can return to the caller if we have found some (this
1373 * ensures forward progress). We'll be coming back to find
1376 if (data->found && need_resched())
1383 * shrink_dcache_parent - prune dcache
1384 * @parent: parent of entries to prune
1386 * Prune the dcache to remove unused children of the parent dentry.
1388 void shrink_dcache_parent(struct dentry *parent)
1391 struct select_data data;
1393 INIT_LIST_HEAD(&data.dispose);
1394 data.start = parent;
1397 d_walk(parent, &data, select_collect, NULL);
1401 shrink_dentry_list(&data.dispose);
1405 EXPORT_SYMBOL(shrink_dcache_parent);
1407 static enum d_walk_ret check_and_collect(void *_data, struct dentry *dentry)
1409 struct select_data *data = _data;
1411 if (d_mountpoint(dentry)) {
1412 data->found = -EBUSY;
1416 return select_collect(_data, dentry);
1419 static void check_and_drop(void *_data)
1421 struct select_data *data = _data;
1423 if (d_mountpoint(data->start))
1424 data->found = -EBUSY;
1426 __d_drop(data->start);
1430 * check_submounts_and_drop - prune dcache, check for submounts and drop
1432 * All done as a single atomic operation relative to has_unlinked_ancestor().
1433 * Returns 0 if successfully unhashed @parent. If there were submounts then
1436 * @dentry: dentry to prune and drop
1438 int check_submounts_and_drop(struct dentry *dentry)
1442 /* Negative dentries can be dropped without further checks */
1443 if (!dentry->d_inode) {
1449 struct select_data data;
1451 INIT_LIST_HEAD(&data.dispose);
1452 data.start = dentry;
1455 d_walk(dentry, &data, check_and_collect, check_and_drop);
1458 if (!list_empty(&data.dispose))
1459 shrink_dentry_list(&data.dispose);
1470 EXPORT_SYMBOL(check_submounts_and_drop);
1473 * __d_alloc - allocate a dcache entry
1474 * @sb: filesystem it will belong to
1475 * @name: qstr of the name
1477 * Allocates a dentry. It returns %NULL if there is insufficient memory
1478 * available. On a success the dentry is returned. The name passed in is
1479 * copied and the copy passed in may be reused after this call.
1482 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1484 struct dentry *dentry;
1487 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1492 * We guarantee that the inline name is always NUL-terminated.
1493 * This way the memcpy() done by the name switching in rename
1494 * will still always have a NUL at the end, even if we might
1495 * be overwriting an internal NUL character
1497 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1498 if (name->len > DNAME_INLINE_LEN-1) {
1499 dname = kmalloc(name->len + 1, GFP_KERNEL);
1501 kmem_cache_free(dentry_cache, dentry);
1505 dname = dentry->d_iname;
1508 dentry->d_name.len = name->len;
1509 dentry->d_name.hash = name->hash;
1510 memcpy(dname, name->name, name->len);
1511 dname[name->len] = 0;
1513 /* Make sure we always see the terminating NUL character */
1515 dentry->d_name.name = dname;
1517 dentry->d_lockref.count = 1;
1518 dentry->d_flags = 0;
1519 spin_lock_init(&dentry->d_lock);
1520 seqcount_init(&dentry->d_seq);
1521 dentry->d_inode = NULL;
1522 dentry->d_parent = dentry;
1524 dentry->d_op = NULL;
1525 dentry->d_fsdata = NULL;
1526 INIT_HLIST_BL_NODE(&dentry->d_hash);
1527 INIT_LIST_HEAD(&dentry->d_lru);
1528 INIT_LIST_HEAD(&dentry->d_subdirs);
1529 INIT_HLIST_NODE(&dentry->d_alias);
1530 INIT_LIST_HEAD(&dentry->d_u.d_child);
1531 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1533 this_cpu_inc(nr_dentry);
1539 * d_alloc - allocate a dcache entry
1540 * @parent: parent of entry to allocate
1541 * @name: qstr of the name
1543 * Allocates a dentry. It returns %NULL if there is insufficient memory
1544 * available. On a success the dentry is returned. The name passed in is
1545 * copied and the copy passed in may be reused after this call.
1547 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1549 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1553 spin_lock(&parent->d_lock);
1555 * don't need child lock because it is not subject
1556 * to concurrency here
1558 __dget_dlock(parent);
1559 dentry->d_parent = parent;
1560 list_add(&dentry->d_u.d_child, &parent->d_subdirs);
1561 spin_unlock(&parent->d_lock);
1565 EXPORT_SYMBOL(d_alloc);
1567 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1569 struct dentry *dentry = __d_alloc(sb, name);
1571 dentry->d_flags |= DCACHE_DISCONNECTED;
1574 EXPORT_SYMBOL(d_alloc_pseudo);
1576 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1581 q.len = strlen(name);
1582 q.hash = full_name_hash(q.name, q.len);
1583 return d_alloc(parent, &q);
1585 EXPORT_SYMBOL(d_alloc_name);
1587 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1589 WARN_ON_ONCE(dentry->d_op);
1590 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1592 DCACHE_OP_REVALIDATE |
1593 DCACHE_OP_WEAK_REVALIDATE |
1594 DCACHE_OP_DELETE ));
1599 dentry->d_flags |= DCACHE_OP_HASH;
1601 dentry->d_flags |= DCACHE_OP_COMPARE;
1602 if (op->d_revalidate)
1603 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1604 if (op->d_weak_revalidate)
1605 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1607 dentry->d_flags |= DCACHE_OP_DELETE;
1609 dentry->d_flags |= DCACHE_OP_PRUNE;
1612 EXPORT_SYMBOL(d_set_d_op);
1614 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1616 spin_lock(&dentry->d_lock);
1618 if (unlikely(IS_AUTOMOUNT(inode)))
1619 dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
1620 hlist_add_head(&dentry->d_alias, &inode->i_dentry);
1622 dentry->d_inode = inode;
1623 dentry_rcuwalk_barrier(dentry);
1624 spin_unlock(&dentry->d_lock);
1625 fsnotify_d_instantiate(dentry, inode);
1629 * d_instantiate - fill in inode information for a dentry
1630 * @entry: dentry to complete
1631 * @inode: inode to attach to this dentry
1633 * Fill in inode information in the entry.
1635 * This turns negative dentries into productive full members
1638 * NOTE! This assumes that the inode count has been incremented
1639 * (or otherwise set) by the caller to indicate that it is now
1640 * in use by the dcache.
1643 void d_instantiate(struct dentry *entry, struct inode * inode)
1645 BUG_ON(!hlist_unhashed(&entry->d_alias));
1647 spin_lock(&inode->i_lock);
1648 __d_instantiate(entry, inode);
1650 spin_unlock(&inode->i_lock);
1651 security_d_instantiate(entry, inode);
1653 EXPORT_SYMBOL(d_instantiate);
1656 * d_instantiate_unique - instantiate a non-aliased dentry
1657 * @entry: dentry to instantiate
1658 * @inode: inode to attach to this dentry
1660 * Fill in inode information in the entry. On success, it returns NULL.
1661 * If an unhashed alias of "entry" already exists, then we return the
1662 * aliased dentry instead and drop one reference to inode.
1664 * Note that in order to avoid conflicts with rename() etc, the caller
1665 * had better be holding the parent directory semaphore.
1667 * This also assumes that the inode count has been incremented
1668 * (or otherwise set) by the caller to indicate that it is now
1669 * in use by the dcache.
1671 static struct dentry *__d_instantiate_unique(struct dentry *entry,
1672 struct inode *inode)
1674 struct dentry *alias;
1675 int len = entry->d_name.len;
1676 const char *name = entry->d_name.name;
1677 unsigned int hash = entry->d_name.hash;
1680 __d_instantiate(entry, NULL);
1684 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
1686 * Don't need alias->d_lock here, because aliases with
1687 * d_parent == entry->d_parent are not subject to name or
1688 * parent changes, because the parent inode i_mutex is held.
1690 if (alias->d_name.hash != hash)
1692 if (alias->d_parent != entry->d_parent)
1694 if (alias->d_name.len != len)
1696 if (dentry_cmp(alias, name, len))
1702 __d_instantiate(entry, inode);
1706 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1708 struct dentry *result;
1710 BUG_ON(!hlist_unhashed(&entry->d_alias));
1713 spin_lock(&inode->i_lock);
1714 result = __d_instantiate_unique(entry, inode);
1716 spin_unlock(&inode->i_lock);
1719 security_d_instantiate(entry, inode);
1723 BUG_ON(!d_unhashed(result));
1728 EXPORT_SYMBOL(d_instantiate_unique);
1730 struct dentry *d_make_root(struct inode *root_inode)
1732 struct dentry *res = NULL;
1735 static const struct qstr name = QSTR_INIT("/", 1);
1737 res = __d_alloc(root_inode->i_sb, &name);
1739 d_instantiate(res, root_inode);
1745 EXPORT_SYMBOL(d_make_root);
1747 static struct dentry * __d_find_any_alias(struct inode *inode)
1749 struct dentry *alias;
1751 if (hlist_empty(&inode->i_dentry))
1753 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
1759 * d_find_any_alias - find any alias for a given inode
1760 * @inode: inode to find an alias for
1762 * If any aliases exist for the given inode, take and return a
1763 * reference for one of them. If no aliases exist, return %NULL.
1765 struct dentry *d_find_any_alias(struct inode *inode)
1769 spin_lock(&inode->i_lock);
1770 de = __d_find_any_alias(inode);
1771 spin_unlock(&inode->i_lock);
1774 EXPORT_SYMBOL(d_find_any_alias);
1777 * d_obtain_alias - find or allocate a dentry for a given inode
1778 * @inode: inode to allocate the dentry for
1780 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1781 * similar open by handle operations. The returned dentry may be anonymous,
1782 * or may have a full name (if the inode was already in the cache).
1784 * When called on a directory inode, we must ensure that the inode only ever
1785 * has one dentry. If a dentry is found, that is returned instead of
1786 * allocating a new one.
1788 * On successful return, the reference to the inode has been transferred
1789 * to the dentry. In case of an error the reference on the inode is released.
1790 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1791 * be passed in and will be the error will be propagate to the return value,
1792 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1794 struct dentry *d_obtain_alias(struct inode *inode)
1796 static const struct qstr anonstring = QSTR_INIT("/", 1);
1801 return ERR_PTR(-ESTALE);
1803 return ERR_CAST(inode);
1805 res = d_find_any_alias(inode);
1809 tmp = __d_alloc(inode->i_sb, &anonstring);
1811 res = ERR_PTR(-ENOMEM);
1815 spin_lock(&inode->i_lock);
1816 res = __d_find_any_alias(inode);
1818 spin_unlock(&inode->i_lock);
1823 /* attach a disconnected dentry */
1824 spin_lock(&tmp->d_lock);
1825 tmp->d_inode = inode;
1826 tmp->d_flags |= DCACHE_DISCONNECTED;
1827 hlist_add_head(&tmp->d_alias, &inode->i_dentry);
1828 hlist_bl_lock(&tmp->d_sb->s_anon);
1829 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1830 hlist_bl_unlock(&tmp->d_sb->s_anon);
1831 spin_unlock(&tmp->d_lock);
1832 spin_unlock(&inode->i_lock);
1833 security_d_instantiate(tmp, inode);
1838 if (res && !IS_ERR(res))
1839 security_d_instantiate(res, inode);
1843 EXPORT_SYMBOL(d_obtain_alias);
1846 * d_splice_alias - splice a disconnected dentry into the tree if one exists
1847 * @inode: the inode which may have a disconnected dentry
1848 * @dentry: a negative dentry which we want to point to the inode.
1850 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and
1851 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry
1852 * and return it, else simply d_add the inode to the dentry and return NULL.
1854 * This is needed in the lookup routine of any filesystem that is exportable
1855 * (via knfsd) so that we can build dcache paths to directories effectively.
1857 * If a dentry was found and moved, then it is returned. Otherwise NULL
1858 * is returned. This matches the expected return value of ->lookup.
1860 * Cluster filesystems may call this function with a negative, hashed dentry.
1861 * In that case, we know that the inode will be a regular file, and also this
1862 * will only occur during atomic_open. So we need to check for the dentry
1863 * being already hashed only in the final case.
1865 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1867 struct dentry *new = NULL;
1870 return ERR_CAST(inode);
1872 if (inode && S_ISDIR(inode->i_mode)) {
1873 spin_lock(&inode->i_lock);
1874 new = __d_find_alias(inode, 1);
1876 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
1877 spin_unlock(&inode->i_lock);
1878 security_d_instantiate(new, inode);
1879 d_move(new, dentry);
1882 /* already taking inode->i_lock, so d_add() by hand */
1883 __d_instantiate(dentry, inode);
1884 spin_unlock(&inode->i_lock);
1885 security_d_instantiate(dentry, inode);
1889 d_instantiate(dentry, inode);
1890 if (d_unhashed(dentry))
1895 EXPORT_SYMBOL(d_splice_alias);
1898 * d_add_ci - lookup or allocate new dentry with case-exact name
1899 * @inode: the inode case-insensitive lookup has found
1900 * @dentry: the negative dentry that was passed to the parent's lookup func
1901 * @name: the case-exact name to be associated with the returned dentry
1903 * This is to avoid filling the dcache with case-insensitive names to the
1904 * same inode, only the actual correct case is stored in the dcache for
1905 * case-insensitive filesystems.
1907 * For a case-insensitive lookup match and if the the case-exact dentry
1908 * already exists in in the dcache, use it and return it.
1910 * If no entry exists with the exact case name, allocate new dentry with
1911 * the exact case, and return the spliced entry.
1913 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1916 struct dentry *found;
1920 * First check if a dentry matching the name already exists,
1921 * if not go ahead and create it now.
1923 found = d_hash_and_lookup(dentry->d_parent, name);
1924 if (unlikely(IS_ERR(found)))
1927 new = d_alloc(dentry->d_parent, name);
1929 found = ERR_PTR(-ENOMEM);
1933 found = d_splice_alias(inode, new);
1942 * If a matching dentry exists, and it's not negative use it.
1944 * Decrement the reference count to balance the iget() done
1947 if (found->d_inode) {
1948 if (unlikely(found->d_inode != inode)) {
1949 /* This can't happen because bad inodes are unhashed. */
1950 BUG_ON(!is_bad_inode(inode));
1951 BUG_ON(!is_bad_inode(found->d_inode));
1958 * Negative dentry: instantiate it unless the inode is a directory and
1959 * already has a dentry.
1961 new = d_splice_alias(inode, found);
1972 EXPORT_SYMBOL(d_add_ci);
1975 * Do the slow-case of the dentry name compare.
1977 * Unlike the dentry_cmp() function, we need to atomically
1978 * load the name and length information, so that the
1979 * filesystem can rely on them, and can use the 'name' and
1980 * 'len' information without worrying about walking off the
1981 * end of memory etc.
1983 * Thus the read_seqcount_retry() and the "duplicate" info
1984 * in arguments (the low-level filesystem should not look
1985 * at the dentry inode or name contents directly, since
1986 * rename can change them while we're in RCU mode).
1988 enum slow_d_compare {
1994 static noinline enum slow_d_compare slow_dentry_cmp(
1995 const struct dentry *parent,
1996 struct dentry *dentry,
1998 const struct qstr *name)
2000 int tlen = dentry->d_name.len;
2001 const char *tname = dentry->d_name.name;
2003 if (read_seqcount_retry(&dentry->d_seq, seq)) {
2005 return D_COMP_SEQRETRY;
2007 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2008 return D_COMP_NOMATCH;
2013 * __d_lookup_rcu - search for a dentry (racy, store-free)
2014 * @parent: parent dentry
2015 * @name: qstr of name we wish to find
2016 * @seqp: returns d_seq value at the point where the dentry was found
2017 * Returns: dentry, or NULL
2019 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2020 * resolution (store-free path walking) design described in
2021 * Documentation/filesystems/path-lookup.txt.
2023 * This is not to be used outside core vfs.
2025 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2026 * held, and rcu_read_lock held. The returned dentry must not be stored into
2027 * without taking d_lock and checking d_seq sequence count against @seq
2030 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2033 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2034 * the returned dentry, so long as its parent's seqlock is checked after the
2035 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2036 * is formed, giving integrity down the path walk.
2038 * NOTE! The caller *has* to check the resulting dentry against the sequence
2039 * number we've returned before using any of the resulting dentry state!
2041 struct dentry *__d_lookup_rcu(const struct dentry *parent,
2042 const struct qstr *name,
2045 u64 hashlen = name->hash_len;
2046 const unsigned char *str = name->name;
2047 struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen));
2048 struct hlist_bl_node *node;
2049 struct dentry *dentry;
2052 * Note: There is significant duplication with __d_lookup_rcu which is
2053 * required to prevent single threaded performance regressions
2054 * especially on architectures where smp_rmb (in seqcounts) are costly.
2055 * Keep the two functions in sync.
2059 * The hash list is protected using RCU.
2061 * Carefully use d_seq when comparing a candidate dentry, to avoid
2062 * races with d_move().
2064 * It is possible that concurrent renames can mess up our list
2065 * walk here and result in missing our dentry, resulting in the
2066 * false-negative result. d_lookup() protects against concurrent
2067 * renames using rename_lock seqlock.
2069 * See Documentation/filesystems/path-lookup.txt for more details.
2071 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2076 * The dentry sequence count protects us from concurrent
2077 * renames, and thus protects parent and name fields.
2079 * The caller must perform a seqcount check in order
2080 * to do anything useful with the returned dentry.
2082 * NOTE! We do a "raw" seqcount_begin here. That means that
2083 * we don't wait for the sequence count to stabilize if it
2084 * is in the middle of a sequence change. If we do the slow
2085 * dentry compare, we will do seqretries until it is stable,
2086 * and if we end up with a successful lookup, we actually
2087 * want to exit RCU lookup anyway.
2089 seq = raw_seqcount_begin(&dentry->d_seq);
2090 if (dentry->d_parent != parent)
2092 if (d_unhashed(dentry))
2095 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2096 if (dentry->d_name.hash != hashlen_hash(hashlen))
2099 switch (slow_dentry_cmp(parent, dentry, seq, name)) {
2102 case D_COMP_NOMATCH:
2109 if (dentry->d_name.hash_len != hashlen)
2112 if (!dentry_cmp(dentry, str, hashlen_len(hashlen)))
2119 * d_lookup - search for a dentry
2120 * @parent: parent dentry
2121 * @name: qstr of name we wish to find
2122 * Returns: dentry, or NULL
2124 * d_lookup searches the children of the parent dentry for the name in
2125 * question. If the dentry is found its reference count is incremented and the
2126 * dentry is returned. The caller must use dput to free the entry when it has
2127 * finished using it. %NULL is returned if the dentry does not exist.
2129 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2131 struct dentry *dentry;
2135 seq = read_seqbegin(&rename_lock);
2136 dentry = __d_lookup(parent, name);
2139 } while (read_seqretry(&rename_lock, seq));
2142 EXPORT_SYMBOL(d_lookup);
2145 * __d_lookup - search for a dentry (racy)
2146 * @parent: parent dentry
2147 * @name: qstr of name we wish to find
2148 * Returns: dentry, or NULL
2150 * __d_lookup is like d_lookup, however it may (rarely) return a
2151 * false-negative result due to unrelated rename activity.
2153 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2154 * however it must be used carefully, eg. with a following d_lookup in
2155 * the case of failure.
2157 * __d_lookup callers must be commented.
2159 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2161 unsigned int len = name->len;
2162 unsigned int hash = name->hash;
2163 const unsigned char *str = name->name;
2164 struct hlist_bl_head *b = d_hash(parent, hash);
2165 struct hlist_bl_node *node;
2166 struct dentry *found = NULL;
2167 struct dentry *dentry;
2170 * Note: There is significant duplication with __d_lookup_rcu which is
2171 * required to prevent single threaded performance regressions
2172 * especially on architectures where smp_rmb (in seqcounts) are costly.
2173 * Keep the two functions in sync.
2177 * The hash list is protected using RCU.
2179 * Take d_lock when comparing a candidate dentry, to avoid races
2182 * It is possible that concurrent renames can mess up our list
2183 * walk here and result in missing our dentry, resulting in the
2184 * false-negative result. d_lookup() protects against concurrent
2185 * renames using rename_lock seqlock.
2187 * See Documentation/filesystems/path-lookup.txt for more details.
2191 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2193 if (dentry->d_name.hash != hash)
2196 spin_lock(&dentry->d_lock);
2197 if (dentry->d_parent != parent)
2199 if (d_unhashed(dentry))
2203 * It is safe to compare names since d_move() cannot
2204 * change the qstr (protected by d_lock).
2206 if (parent->d_flags & DCACHE_OP_COMPARE) {
2207 int tlen = dentry->d_name.len;
2208 const char *tname = dentry->d_name.name;
2209 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2212 if (dentry->d_name.len != len)
2214 if (dentry_cmp(dentry, str, len))
2218 dentry->d_lockref.count++;
2220 spin_unlock(&dentry->d_lock);
2223 spin_unlock(&dentry->d_lock);
2231 * d_hash_and_lookup - hash the qstr then search for a dentry
2232 * @dir: Directory to search in
2233 * @name: qstr of name we wish to find
2235 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2237 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2240 * Check for a fs-specific hash function. Note that we must
2241 * calculate the standard hash first, as the d_op->d_hash()
2242 * routine may choose to leave the hash value unchanged.
2244 name->hash = full_name_hash(name->name, name->len);
2245 if (dir->d_flags & DCACHE_OP_HASH) {
2246 int err = dir->d_op->d_hash(dir, name);
2247 if (unlikely(err < 0))
2248 return ERR_PTR(err);
2250 return d_lookup(dir, name);
2252 EXPORT_SYMBOL(d_hash_and_lookup);
2255 * d_validate - verify dentry provided from insecure source (deprecated)
2256 * @dentry: The dentry alleged to be valid child of @dparent
2257 * @dparent: The parent dentry (known to be valid)
2259 * An insecure source has sent us a dentry, here we verify it and dget() it.
2260 * This is used by ncpfs in its readdir implementation.
2261 * Zero is returned in the dentry is invalid.
2263 * This function is slow for big directories, and deprecated, do not use it.
2265 int d_validate(struct dentry *dentry, struct dentry *dparent)
2267 struct dentry *child;
2269 spin_lock(&dparent->d_lock);
2270 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
2271 if (dentry == child) {
2272 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2273 __dget_dlock(dentry);
2274 spin_unlock(&dentry->d_lock);
2275 spin_unlock(&dparent->d_lock);
2279 spin_unlock(&dparent->d_lock);
2283 EXPORT_SYMBOL(d_validate);
2286 * When a file is deleted, we have two options:
2287 * - turn this dentry into a negative dentry
2288 * - unhash this dentry and free it.
2290 * Usually, we want to just turn this into
2291 * a negative dentry, but if anybody else is
2292 * currently using the dentry or the inode
2293 * we can't do that and we fall back on removing
2294 * it from the hash queues and waiting for
2295 * it to be deleted later when it has no users
2299 * d_delete - delete a dentry
2300 * @dentry: The dentry to delete
2302 * Turn the dentry into a negative dentry if possible, otherwise
2303 * remove it from the hash queues so it can be deleted later
2306 void d_delete(struct dentry * dentry)
2308 struct inode *inode;
2311 * Are we the only user?
2314 spin_lock(&dentry->d_lock);
2315 inode = dentry->d_inode;
2316 isdir = S_ISDIR(inode->i_mode);
2317 if (dentry->d_lockref.count == 1) {
2318 if (!spin_trylock(&inode->i_lock)) {
2319 spin_unlock(&dentry->d_lock);
2323 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2324 dentry_unlink_inode(dentry);
2325 fsnotify_nameremove(dentry, isdir);
2329 if (!d_unhashed(dentry))
2332 spin_unlock(&dentry->d_lock);
2334 fsnotify_nameremove(dentry, isdir);
2336 EXPORT_SYMBOL(d_delete);
2338 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2340 BUG_ON(!d_unhashed(entry));
2342 entry->d_flags |= DCACHE_RCUACCESS;
2343 hlist_bl_add_head_rcu(&entry->d_hash, b);
2347 static void _d_rehash(struct dentry * entry)
2349 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
2353 * d_rehash - add an entry back to the hash
2354 * @entry: dentry to add to the hash
2356 * Adds a dentry to the hash according to its name.
2359 void d_rehash(struct dentry * entry)
2361 spin_lock(&entry->d_lock);
2363 spin_unlock(&entry->d_lock);
2365 EXPORT_SYMBOL(d_rehash);
2368 * dentry_update_name_case - update case insensitive dentry with a new name
2369 * @dentry: dentry to be updated
2372 * Update a case insensitive dentry with new case of name.
2374 * dentry must have been returned by d_lookup with name @name. Old and new
2375 * name lengths must match (ie. no d_compare which allows mismatched name
2378 * Parent inode i_mutex must be held over d_lookup and into this call (to
2379 * keep renames and concurrent inserts, and readdir(2) away).
2381 void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2383 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
2384 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2386 spin_lock(&dentry->d_lock);
2387 write_seqcount_begin(&dentry->d_seq);
2388 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2389 write_seqcount_end(&dentry->d_seq);
2390 spin_unlock(&dentry->d_lock);
2392 EXPORT_SYMBOL(dentry_update_name_case);
2394 static void switch_names(struct dentry *dentry, struct dentry *target)
2396 if (dname_external(target)) {
2397 if (dname_external(dentry)) {
2399 * Both external: swap the pointers
2401 swap(target->d_name.name, dentry->d_name.name);
2404 * dentry:internal, target:external. Steal target's
2405 * storage and make target internal.
2407 memcpy(target->d_iname, dentry->d_name.name,
2408 dentry->d_name.len + 1);
2409 dentry->d_name.name = target->d_name.name;
2410 target->d_name.name = target->d_iname;
2413 if (dname_external(dentry)) {
2415 * dentry:external, target:internal. Give dentry's
2416 * storage to target and make dentry internal
2418 memcpy(dentry->d_iname, target->d_name.name,
2419 target->d_name.len + 1);
2420 target->d_name.name = dentry->d_name.name;
2421 dentry->d_name.name = dentry->d_iname;
2424 * Both are internal. Just copy target to dentry
2426 memcpy(dentry->d_iname, target->d_name.name,
2427 target->d_name.len + 1);
2428 dentry->d_name.len = target->d_name.len;
2432 swap(dentry->d_name.len, target->d_name.len);
2435 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2438 * XXXX: do we really need to take target->d_lock?
2440 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2441 spin_lock(&target->d_parent->d_lock);
2443 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2444 spin_lock(&dentry->d_parent->d_lock);
2445 spin_lock_nested(&target->d_parent->d_lock,
2446 DENTRY_D_LOCK_NESTED);
2448 spin_lock(&target->d_parent->d_lock);
2449 spin_lock_nested(&dentry->d_parent->d_lock,
2450 DENTRY_D_LOCK_NESTED);
2453 if (target < dentry) {
2454 spin_lock_nested(&target->d_lock, 2);
2455 spin_lock_nested(&dentry->d_lock, 3);
2457 spin_lock_nested(&dentry->d_lock, 2);
2458 spin_lock_nested(&target->d_lock, 3);
2462 static void dentry_unlock_parents_for_move(struct dentry *dentry,
2463 struct dentry *target)
2465 if (target->d_parent != dentry->d_parent)
2466 spin_unlock(&dentry->d_parent->d_lock);
2467 if (target->d_parent != target)
2468 spin_unlock(&target->d_parent->d_lock);
2472 * When switching names, the actual string doesn't strictly have to
2473 * be preserved in the target - because we're dropping the target
2474 * anyway. As such, we can just do a simple memcpy() to copy over
2475 * the new name before we switch.
2477 * Note that we have to be a lot more careful about getting the hash
2478 * switched - we have to switch the hash value properly even if it
2479 * then no longer matches the actual (corrupted) string of the target.
2480 * The hash value has to match the hash queue that the dentry is on..
2483 * __d_move - move a dentry
2484 * @dentry: entry to move
2485 * @target: new dentry
2487 * Update the dcache to reflect the move of a file name. Negative
2488 * dcache entries should not be moved in this way. Caller must hold
2489 * rename_lock, the i_mutex of the source and target directories,
2490 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2492 static void __d_move(struct dentry * dentry, struct dentry * target)
2494 if (!dentry->d_inode)
2495 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2497 BUG_ON(d_ancestor(dentry, target));
2498 BUG_ON(d_ancestor(target, dentry));
2500 dentry_lock_for_move(dentry, target);
2502 write_seqcount_begin(&dentry->d_seq);
2503 write_seqcount_begin(&target->d_seq);
2505 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2508 * Move the dentry to the target hash queue. Don't bother checking
2509 * for the same hash queue because of how unlikely it is.
2512 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
2514 /* Unhash the target: dput() will then get rid of it */
2517 list_del(&dentry->d_u.d_child);
2518 list_del(&target->d_u.d_child);
2520 /* Switch the names.. */
2521 switch_names(dentry, target);
2522 swap(dentry->d_name.hash, target->d_name.hash);
2524 /* ... and switch the parents */
2525 if (IS_ROOT(dentry)) {
2526 dentry->d_parent = target->d_parent;
2527 target->d_parent = target;
2528 INIT_LIST_HEAD(&target->d_u.d_child);
2530 swap(dentry->d_parent, target->d_parent);
2532 /* And add them back to the (new) parent lists */
2533 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
2536 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2538 write_seqcount_end(&target->d_seq);
2539 write_seqcount_end(&dentry->d_seq);
2541 dentry_unlock_parents_for_move(dentry, target);
2542 spin_unlock(&target->d_lock);
2543 fsnotify_d_move(dentry);
2544 spin_unlock(&dentry->d_lock);
2548 * d_move - move a dentry
2549 * @dentry: entry to move
2550 * @target: new dentry
2552 * Update the dcache to reflect the move of a file name. Negative
2553 * dcache entries should not be moved in this way. See the locking
2554 * requirements for __d_move.
2556 void d_move(struct dentry *dentry, struct dentry *target)
2558 write_seqlock(&rename_lock);
2559 __d_move(dentry, target);
2560 write_sequnlock(&rename_lock);
2562 EXPORT_SYMBOL(d_move);
2565 * d_ancestor - search for an ancestor
2566 * @p1: ancestor dentry
2569 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2570 * an ancestor of p2, else NULL.
2572 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2576 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2577 if (p->d_parent == p1)
2584 * This helper attempts to cope with remotely renamed directories
2586 * It assumes that the caller is already holding
2587 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
2589 * Note: If ever the locking in lock_rename() changes, then please
2590 * remember to update this too...
2592 static struct dentry *__d_unalias(struct inode *inode,
2593 struct dentry *dentry, struct dentry *alias)
2595 struct mutex *m1 = NULL, *m2 = NULL;
2596 struct dentry *ret = ERR_PTR(-EBUSY);
2598 /* If alias and dentry share a parent, then no extra locks required */
2599 if (alias->d_parent == dentry->d_parent)
2602 /* See lock_rename() */
2603 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2605 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2606 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
2608 m2 = &alias->d_parent->d_inode->i_mutex;
2610 if (likely(!d_mountpoint(alias))) {
2611 __d_move(alias, dentry);
2615 spin_unlock(&inode->i_lock);
2624 * Prepare an anonymous dentry for life in the superblock's dentry tree as a
2625 * named dentry in place of the dentry to be replaced.
2626 * returns with anon->d_lock held!
2628 static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
2630 struct dentry *dparent;
2632 dentry_lock_for_move(anon, dentry);
2634 write_seqcount_begin(&dentry->d_seq);
2635 write_seqcount_begin(&anon->d_seq);
2637 dparent = dentry->d_parent;
2639 switch_names(dentry, anon);
2640 swap(dentry->d_name.hash, anon->d_name.hash);
2642 dentry->d_parent = dentry;
2643 list_del_init(&dentry->d_u.d_child);
2644 anon->d_parent = dparent;
2645 list_move(&anon->d_u.d_child, &dparent->d_subdirs);
2647 write_seqcount_end(&dentry->d_seq);
2648 write_seqcount_end(&anon->d_seq);
2650 dentry_unlock_parents_for_move(anon, dentry);
2651 spin_unlock(&dentry->d_lock);
2653 /* anon->d_lock still locked, returns locked */
2654 anon->d_flags &= ~DCACHE_DISCONNECTED;
2658 * d_materialise_unique - introduce an inode into the tree
2659 * @dentry: candidate dentry
2660 * @inode: inode to bind to the dentry, to which aliases may be attached
2662 * Introduces an dentry into the tree, substituting an extant disconnected
2663 * root directory alias in its place if there is one. Caller must hold the
2664 * i_mutex of the parent directory.
2666 struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2668 struct dentry *actual;
2670 BUG_ON(!d_unhashed(dentry));
2674 __d_instantiate(dentry, NULL);
2679 spin_lock(&inode->i_lock);
2681 if (S_ISDIR(inode->i_mode)) {
2682 struct dentry *alias;
2684 /* Does an aliased dentry already exist? */
2685 alias = __d_find_alias(inode, 0);
2688 write_seqlock(&rename_lock);
2690 if (d_ancestor(alias, dentry)) {
2691 /* Check for loops */
2692 actual = ERR_PTR(-ELOOP);
2693 spin_unlock(&inode->i_lock);
2694 } else if (IS_ROOT(alias)) {
2695 /* Is this an anonymous mountpoint that we
2696 * could splice into our tree? */
2697 __d_materialise_dentry(dentry, alias);
2698 write_sequnlock(&rename_lock);
2702 /* Nope, but we must(!) avoid directory
2703 * aliasing. This drops inode->i_lock */
2704 actual = __d_unalias(inode, dentry, alias);
2706 write_sequnlock(&rename_lock);
2707 if (IS_ERR(actual)) {
2708 if (PTR_ERR(actual) == -ELOOP)
2709 pr_warn_ratelimited(
2710 "VFS: Lookup of '%s' in %s %s"
2711 " would have caused loop\n",
2712 dentry->d_name.name,
2713 inode->i_sb->s_type->name,
2721 /* Add a unique reference */
2722 actual = __d_instantiate_unique(dentry, inode);
2726 BUG_ON(!d_unhashed(actual));
2728 spin_lock(&actual->d_lock);
2731 spin_unlock(&actual->d_lock);
2732 spin_unlock(&inode->i_lock);
2734 if (actual == dentry) {
2735 security_d_instantiate(dentry, inode);
2742 EXPORT_SYMBOL_GPL(d_materialise_unique);
2744 static int prepend(char **buffer, int *buflen, const char *str, int namelen)
2748 return -ENAMETOOLONG;
2750 memcpy(*buffer, str, namelen);
2755 * prepend_name - prepend a pathname in front of current buffer pointer
2756 * buffer: buffer pointer
2757 * buflen: allocated length of the buffer
2758 * name: name string and length qstr structure
2760 * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
2761 * make sure that either the old or the new name pointer and length are
2762 * fetched. However, there may be mismatch between length and pointer.
2763 * The length cannot be trusted, we need to copy it byte-by-byte until
2764 * the length is reached or a null byte is found. It also prepends "/" at
2765 * the beginning of the name. The sequence number check at the caller will
2766 * retry it again when a d_move() does happen. So any garbage in the buffer
2767 * due to mismatched pointer and length will be discarded.
2769 static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2771 const char *dname = ACCESS_ONCE(name->name);
2772 u32 dlen = ACCESS_ONCE(name->len);
2775 if (*buflen < dlen + 1)
2776 return -ENAMETOOLONG;
2777 *buflen -= dlen + 1;
2778 p = *buffer -= dlen + 1;
2790 * prepend_path - Prepend path string to a buffer
2791 * @path: the dentry/vfsmount to report
2792 * @root: root vfsmnt/dentry
2793 * @buffer: pointer to the end of the buffer
2794 * @buflen: pointer to buffer length
2796 * The function tries to write out the pathname without taking any lock other
2797 * than the RCU read lock to make sure that dentries won't go away. It only
2798 * checks the sequence number of the global rename_lock as any change in the
2799 * dentry's d_seq will be preceded by changes in the rename_lock sequence
2800 * number. If the sequence number had been change, it will restart the whole
2801 * pathname back-tracing sequence again. It performs a total of 3 trials of
2802 * lockless back-tracing sequences before falling back to take the
2805 static int prepend_path(const struct path *path,
2806 const struct path *root,
2807 char **buffer, int *buflen)
2809 struct dentry *dentry = path->dentry;
2810 struct vfsmount *vfsmnt = path->mnt;
2811 struct mount *mnt = real_mount(vfsmnt);
2821 read_seqbegin_or_lock(&rename_lock, &seq);
2822 while (dentry != root->dentry || vfsmnt != root->mnt) {
2823 struct dentry * parent;
2825 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
2827 if (mnt_has_parent(mnt)) {
2828 dentry = mnt->mnt_mountpoint;
2829 mnt = mnt->mnt_parent;
2834 * Filesystems needing to implement special "root names"
2835 * should do so with ->d_dname()
2837 if (IS_ROOT(dentry) &&
2838 (dentry->d_name.len != 1 ||
2839 dentry->d_name.name[0] != '/')) {
2840 WARN(1, "Root dentry has weird name <%.*s>\n",
2841 (int) dentry->d_name.len,
2842 dentry->d_name.name);
2845 error = is_mounted(vfsmnt) ? 1 : 2;
2848 parent = dentry->d_parent;
2850 error = prepend_name(&bptr, &blen, &dentry->d_name);
2858 if (need_seqretry(&rename_lock, seq)) {
2862 done_seqretry(&rename_lock, seq);
2864 if (error >= 0 && bptr == *buffer) {
2866 error = -ENAMETOOLONG;
2876 * __d_path - return the path of a dentry
2877 * @path: the dentry/vfsmount to report
2878 * @root: root vfsmnt/dentry
2879 * @buf: buffer to return value in
2880 * @buflen: buffer length
2882 * Convert a dentry into an ASCII path name.
2884 * Returns a pointer into the buffer or an error code if the
2885 * path was too long.
2887 * "buflen" should be positive.
2889 * If the path is not reachable from the supplied root, return %NULL.
2891 char *__d_path(const struct path *path,
2892 const struct path *root,
2893 char *buf, int buflen)
2895 char *res = buf + buflen;
2898 prepend(&res, &buflen, "\0", 1);
2899 br_read_lock(&vfsmount_lock);
2900 error = prepend_path(path, root, &res, &buflen);
2901 br_read_unlock(&vfsmount_lock);
2904 return ERR_PTR(error);
2910 char *d_absolute_path(const struct path *path,
2911 char *buf, int buflen)
2913 struct path root = {};
2914 char *res = buf + buflen;
2917 prepend(&res, &buflen, "\0", 1);
2918 br_read_lock(&vfsmount_lock);
2919 error = prepend_path(path, &root, &res, &buflen);
2920 br_read_unlock(&vfsmount_lock);
2925 return ERR_PTR(error);
2930 * same as __d_path but appends "(deleted)" for unlinked files.
2932 static int path_with_deleted(const struct path *path,
2933 const struct path *root,
2934 char **buf, int *buflen)
2936 prepend(buf, buflen, "\0", 1);
2937 if (d_unlinked(path->dentry)) {
2938 int error = prepend(buf, buflen, " (deleted)", 10);
2943 return prepend_path(path, root, buf, buflen);
2946 static int prepend_unreachable(char **buffer, int *buflen)
2948 return prepend(buffer, buflen, "(unreachable)", 13);
2952 * d_path - return the path of a dentry
2953 * @path: path to report
2954 * @buf: buffer to return value in
2955 * @buflen: buffer length
2957 * Convert a dentry into an ASCII path name. If the entry has been deleted
2958 * the string " (deleted)" is appended. Note that this is ambiguous.
2960 * Returns a pointer into the buffer or an error code if the path was
2961 * too long. Note: Callers should use the returned pointer, not the passed
2962 * in buffer, to use the name! The implementation often starts at an offset
2963 * into the buffer, and may leave 0 bytes at the start.
2965 * "buflen" should be positive.
2967 char *d_path(const struct path *path, char *buf, int buflen)
2969 char *res = buf + buflen;
2974 * We have various synthetic filesystems that never get mounted. On
2975 * these filesystems dentries are never used for lookup purposes, and
2976 * thus don't need to be hashed. They also don't need a name until a
2977 * user wants to identify the object in /proc/pid/fd/. The little hack
2978 * below allows us to generate a name for these objects on demand:
2980 if (path->dentry->d_op && path->dentry->d_op->d_dname)
2981 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
2983 get_fs_root(current->fs, &root);
2984 br_read_lock(&vfsmount_lock);
2985 error = path_with_deleted(path, &root, &res, &buflen);
2986 br_read_unlock(&vfsmount_lock);
2988 res = ERR_PTR(error);
2992 EXPORT_SYMBOL(d_path);
2995 * Helper function for dentry_operations.d_dname() members
2997 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
2998 const char *fmt, ...)
3004 va_start(args, fmt);
3005 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
3008 if (sz > sizeof(temp) || sz > buflen)
3009 return ERR_PTR(-ENAMETOOLONG);
3011 buffer += buflen - sz;
3012 return memcpy(buffer, temp, sz);
3015 char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
3017 char *end = buffer + buflen;
3018 /* these dentries are never renamed, so d_lock is not needed */
3019 if (prepend(&end, &buflen, " (deleted)", 11) ||
3020 prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
3021 prepend(&end, &buflen, "/", 1))
3022 end = ERR_PTR(-ENAMETOOLONG);
3027 * Write full pathname from the root of the filesystem into the buffer.
3029 static char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
3039 prepend(&end, &len, "\0", 1);
3045 read_seqbegin_or_lock(&rename_lock, &seq);
3046 while (!IS_ROOT(dentry)) {
3047 struct dentry *parent = dentry->d_parent;
3051 error = prepend_name(&end, &len, &dentry->d_name);
3060 if (need_seqretry(&rename_lock, seq)) {
3064 done_seqretry(&rename_lock, seq);
3069 return ERR_PTR(-ENAMETOOLONG);
3072 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
3074 return __dentry_path(dentry, buf, buflen);
3076 EXPORT_SYMBOL(dentry_path_raw);
3078 char *dentry_path(struct dentry *dentry, char *buf, int buflen)
3083 if (d_unlinked(dentry)) {
3085 if (prepend(&p, &buflen, "//deleted", 10) != 0)
3089 retval = __dentry_path(dentry, buf, buflen);
3090 if (!IS_ERR(retval) && p)
3091 *p = '/'; /* restore '/' overriden with '\0' */
3094 return ERR_PTR(-ENAMETOOLONG);
3098 * NOTE! The user-level library version returns a
3099 * character pointer. The kernel system call just
3100 * returns the length of the buffer filled (which
3101 * includes the ending '\0' character), or a negative
3102 * error value. So libc would do something like
3104 * char *getcwd(char * buf, size_t size)
3108 * retval = sys_getcwd(buf, size);
3115 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
3118 struct path pwd, root;
3119 char *page = (char *) __get_free_page(GFP_USER);
3124 get_fs_root_and_pwd(current->fs, &root, &pwd);
3127 br_read_lock(&vfsmount_lock);
3128 if (!d_unlinked(pwd.dentry)) {
3130 char *cwd = page + PAGE_SIZE;
3131 int buflen = PAGE_SIZE;
3133 prepend(&cwd, &buflen, "\0", 1);
3134 error = prepend_path(&pwd, &root, &cwd, &buflen);
3135 br_read_unlock(&vfsmount_lock);
3140 /* Unreachable from current root */
3142 error = prepend_unreachable(&cwd, &buflen);
3148 len = PAGE_SIZE + page - cwd;
3151 if (copy_to_user(buf, cwd, len))
3155 br_read_unlock(&vfsmount_lock);
3161 free_page((unsigned long) page);
3166 * Test whether new_dentry is a subdirectory of old_dentry.
3168 * Trivially implemented using the dcache structure
3172 * is_subdir - is new dentry a subdirectory of old_dentry
3173 * @new_dentry: new dentry
3174 * @old_dentry: old dentry
3176 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
3177 * Returns 0 otherwise.
3178 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3181 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3186 if (new_dentry == old_dentry)
3190 /* for restarting inner loop in case of seq retry */
3191 seq = read_seqbegin(&rename_lock);
3193 * Need rcu_readlock to protect against the d_parent trashing
3197 if (d_ancestor(old_dentry, new_dentry))
3202 } while (read_seqretry(&rename_lock, seq));
3207 static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3209 struct dentry *root = data;
3210 if (dentry != root) {
3211 if (d_unhashed(dentry) || !dentry->d_inode)
3214 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3215 dentry->d_flags |= DCACHE_GENOCIDE;
3216 dentry->d_lockref.count--;
3219 return D_WALK_CONTINUE;
3222 void d_genocide(struct dentry *parent)
3224 d_walk(parent, parent, d_genocide_kill, NULL);
3227 void d_tmpfile(struct dentry *dentry, struct inode *inode)
3229 inode_dec_link_count(inode);
3230 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3231 !hlist_unhashed(&dentry->d_alias) ||
3232 !d_unlinked(dentry));
3233 spin_lock(&dentry->d_parent->d_lock);
3234 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3235 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3236 (unsigned long long)inode->i_ino);
3237 spin_unlock(&dentry->d_lock);
3238 spin_unlock(&dentry->d_parent->d_lock);
3239 d_instantiate(dentry, inode);
3241 EXPORT_SYMBOL(d_tmpfile);
3243 static __initdata unsigned long dhash_entries;
3244 static int __init set_dhash_entries(char *str)
3248 dhash_entries = simple_strtoul(str, &str, 0);
3251 __setup("dhash_entries=", set_dhash_entries);
3253 static void __init dcache_init_early(void)
3257 /* If hashes are distributed across NUMA nodes, defer
3258 * hash allocation until vmalloc space is available.
3264 alloc_large_system_hash("Dentry cache",
3265 sizeof(struct hlist_bl_head),
3274 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3275 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3278 static void __init dcache_init(void)
3283 * A constructor could be added for stable state like the lists,
3284 * but it is probably not worth it because of the cache nature
3287 dentry_cache = KMEM_CACHE(dentry,
3288 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
3290 /* Hash may have been set up in dcache_init_early */
3295 alloc_large_system_hash("Dentry cache",
3296 sizeof(struct hlist_bl_head),
3305 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3306 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3309 /* SLAB cache for __getname() consumers */
3310 struct kmem_cache *names_cachep __read_mostly;
3311 EXPORT_SYMBOL(names_cachep);
3313 EXPORT_SYMBOL(d_genocide);
3315 void __init vfs_caches_init_early(void)
3317 dcache_init_early();
3321 void __init vfs_caches_init(unsigned long mempages)
3323 unsigned long reserve;
3325 /* Base hash sizes on available memory, with a reserve equal to
3326 150% of current kernel size */
3328 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
3329 mempages -= reserve;
3331 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3332 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3336 files_init(mempages);