4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/idr.h>
19 #include <linux/acct.h> /* acct_auto_close_mnt */
20 #include <linux/init.h> /* init_rootfs */
21 #include <linux/fs_struct.h> /* get_fs_root et.al. */
22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
23 #include <linux/uaccess.h>
24 #include <linux/proc_ns.h>
25 #include <linux/magic.h>
26 #include <linux/bootmem.h>
30 static unsigned int m_hash_mask __read_mostly;
31 static unsigned int m_hash_shift __read_mostly;
32 static unsigned int mp_hash_mask __read_mostly;
33 static unsigned int mp_hash_shift __read_mostly;
35 static __initdata unsigned long mhash_entries;
36 static int __init set_mhash_entries(char *str)
40 mhash_entries = simple_strtoul(str, &str, 0);
43 __setup("mhash_entries=", set_mhash_entries);
45 static __initdata unsigned long mphash_entries;
46 static int __init set_mphash_entries(char *str)
50 mphash_entries = simple_strtoul(str, &str, 0);
53 __setup("mphash_entries=", set_mphash_entries);
56 static DEFINE_IDA(mnt_id_ida);
57 static DEFINE_IDA(mnt_group_ida);
58 static DEFINE_SPINLOCK(mnt_id_lock);
59 static int mnt_id_start = 0;
60 static int mnt_group_start = 1;
62 static struct hlist_head *mount_hashtable __read_mostly;
63 static struct hlist_head *mountpoint_hashtable __read_mostly;
64 static struct kmem_cache *mnt_cache __read_mostly;
65 static DECLARE_RWSEM(namespace_sem);
68 struct kobject *fs_kobj;
69 EXPORT_SYMBOL_GPL(fs_kobj);
72 * vfsmount lock may be taken for read to prevent changes to the
73 * vfsmount hash, ie. during mountpoint lookups or walking back
76 * It should be taken for write in all cases where the vfsmount
77 * tree or hash is modified or when a vfsmount structure is modified.
79 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
81 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
83 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
84 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
85 tmp = tmp + (tmp >> m_hash_shift);
86 return &mount_hashtable[tmp & m_hash_mask];
89 static inline struct hlist_head *mp_hash(struct dentry *dentry)
91 unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
92 tmp = tmp + (tmp >> mp_hash_shift);
93 return &mountpoint_hashtable[tmp & mp_hash_mask];
97 * allocation is serialized by namespace_sem, but we need the spinlock to
98 * serialize with freeing.
100 static int mnt_alloc_id(struct mount *mnt)
105 ida_pre_get(&mnt_id_ida, GFP_KERNEL);
106 spin_lock(&mnt_id_lock);
107 res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
109 mnt_id_start = mnt->mnt_id + 1;
110 spin_unlock(&mnt_id_lock);
117 static void mnt_free_id(struct mount *mnt)
119 int id = mnt->mnt_id;
120 spin_lock(&mnt_id_lock);
121 ida_remove(&mnt_id_ida, id);
122 if (mnt_id_start > id)
124 spin_unlock(&mnt_id_lock);
128 * Allocate a new peer group ID
130 * mnt_group_ida is protected by namespace_sem
132 static int mnt_alloc_group_id(struct mount *mnt)
136 if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
139 res = ida_get_new_above(&mnt_group_ida,
143 mnt_group_start = mnt->mnt_group_id + 1;
149 * Release a peer group ID
151 void mnt_release_group_id(struct mount *mnt)
153 int id = mnt->mnt_group_id;
154 ida_remove(&mnt_group_ida, id);
155 if (mnt_group_start > id)
156 mnt_group_start = id;
157 mnt->mnt_group_id = 0;
161 * vfsmount lock must be held for read
163 static inline void mnt_add_count(struct mount *mnt, int n)
166 this_cpu_add(mnt->mnt_pcp->mnt_count, n);
175 * vfsmount lock must be held for write
177 unsigned int mnt_get_count(struct mount *mnt)
180 unsigned int count = 0;
183 for_each_possible_cpu(cpu) {
184 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
189 return mnt->mnt_count;
193 static struct mount *alloc_vfsmnt(const char *name)
195 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
199 err = mnt_alloc_id(mnt);
204 mnt->mnt_devname = kstrdup(name, GFP_KERNEL);
205 if (!mnt->mnt_devname)
210 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
212 goto out_free_devname;
214 this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
217 mnt->mnt_writers = 0;
220 INIT_HLIST_NODE(&mnt->mnt_hash);
221 INIT_LIST_HEAD(&mnt->mnt_child);
222 INIT_LIST_HEAD(&mnt->mnt_mounts);
223 INIT_LIST_HEAD(&mnt->mnt_list);
224 INIT_LIST_HEAD(&mnt->mnt_expire);
225 INIT_LIST_HEAD(&mnt->mnt_share);
226 INIT_LIST_HEAD(&mnt->mnt_slave_list);
227 INIT_LIST_HEAD(&mnt->mnt_slave);
228 #ifdef CONFIG_FSNOTIFY
229 INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
236 kfree(mnt->mnt_devname);
241 kmem_cache_free(mnt_cache, mnt);
246 * Most r/o checks on a fs are for operations that take
247 * discrete amounts of time, like a write() or unlink().
248 * We must keep track of when those operations start
249 * (for permission checks) and when they end, so that
250 * we can determine when writes are able to occur to
254 * __mnt_is_readonly: check whether a mount is read-only
255 * @mnt: the mount to check for its write status
257 * This shouldn't be used directly ouside of the VFS.
258 * It does not guarantee that the filesystem will stay
259 * r/w, just that it is right *now*. This can not and
260 * should not be used in place of IS_RDONLY(inode).
261 * mnt_want/drop_write() will _keep_ the filesystem
264 int __mnt_is_readonly(struct vfsmount *mnt)
266 if (mnt->mnt_flags & MNT_READONLY)
268 if (mnt->mnt_sb->s_flags & MS_RDONLY)
272 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
274 static inline void mnt_inc_writers(struct mount *mnt)
277 this_cpu_inc(mnt->mnt_pcp->mnt_writers);
283 static inline void mnt_dec_writers(struct mount *mnt)
286 this_cpu_dec(mnt->mnt_pcp->mnt_writers);
292 static unsigned int mnt_get_writers(struct mount *mnt)
295 unsigned int count = 0;
298 for_each_possible_cpu(cpu) {
299 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
304 return mnt->mnt_writers;
308 static int mnt_is_readonly(struct vfsmount *mnt)
310 if (mnt->mnt_sb->s_readonly_remount)
312 /* Order wrt setting s_flags/s_readonly_remount in do_remount() */
314 return __mnt_is_readonly(mnt);
318 * Most r/o & frozen checks on a fs are for operations that take discrete
319 * amounts of time, like a write() or unlink(). We must keep track of when
320 * those operations start (for permission checks) and when they end, so that we
321 * can determine when writes are able to occur to a filesystem.
324 * __mnt_want_write - get write access to a mount without freeze protection
325 * @m: the mount on which to take a write
327 * This tells the low-level filesystem that a write is about to be performed to
328 * it, and makes sure that writes are allowed (mnt it read-write) before
329 * returning success. This operation does not protect against filesystem being
330 * frozen. When the write operation is finished, __mnt_drop_write() must be
331 * called. This is effectively a refcount.
333 int __mnt_want_write(struct vfsmount *m)
335 struct mount *mnt = real_mount(m);
339 mnt_inc_writers(mnt);
341 * The store to mnt_inc_writers must be visible before we pass
342 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
343 * incremented count after it has set MNT_WRITE_HOLD.
346 while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
349 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
350 * be set to match its requirements. So we must not load that until
351 * MNT_WRITE_HOLD is cleared.
354 if (mnt_is_readonly(m)) {
355 mnt_dec_writers(mnt);
364 * mnt_want_write - get write access to a mount
365 * @m: the mount on which to take a write
367 * This tells the low-level filesystem that a write is about to be performed to
368 * it, and makes sure that writes are allowed (mount is read-write, filesystem
369 * is not frozen) before returning success. When the write operation is
370 * finished, mnt_drop_write() must be called. This is effectively a refcount.
372 int mnt_want_write(struct vfsmount *m)
376 sb_start_write(m->mnt_sb);
377 ret = __mnt_want_write(m);
379 sb_end_write(m->mnt_sb);
382 EXPORT_SYMBOL_GPL(mnt_want_write);
385 * mnt_clone_write - get write access to a mount
386 * @mnt: the mount on which to take a write
388 * This is effectively like mnt_want_write, except
389 * it must only be used to take an extra write reference
390 * on a mountpoint that we already know has a write reference
391 * on it. This allows some optimisation.
393 * After finished, mnt_drop_write must be called as usual to
394 * drop the reference.
396 int mnt_clone_write(struct vfsmount *mnt)
398 /* superblock may be r/o */
399 if (__mnt_is_readonly(mnt))
402 mnt_inc_writers(real_mount(mnt));
406 EXPORT_SYMBOL_GPL(mnt_clone_write);
409 * __mnt_want_write_file - get write access to a file's mount
410 * @file: the file who's mount on which to take a write
412 * This is like __mnt_want_write, but it takes a file and can
413 * do some optimisations if the file is open for write already
415 int __mnt_want_write_file(struct file *file)
417 struct inode *inode = file_inode(file);
419 if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode))
420 return __mnt_want_write(file->f_path.mnt);
422 return mnt_clone_write(file->f_path.mnt);
426 * mnt_want_write_file - get write access to a file's mount
427 * @file: the file who's mount on which to take a write
429 * This is like mnt_want_write, but it takes a file and can
430 * do some optimisations if the file is open for write already
432 int mnt_want_write_file(struct file *file)
436 sb_start_write(file->f_path.mnt->mnt_sb);
437 ret = __mnt_want_write_file(file);
439 sb_end_write(file->f_path.mnt->mnt_sb);
442 EXPORT_SYMBOL_GPL(mnt_want_write_file);
445 * __mnt_drop_write - give up write access to a mount
446 * @mnt: the mount on which to give up write access
448 * Tells the low-level filesystem that we are done
449 * performing writes to it. Must be matched with
450 * __mnt_want_write() call above.
452 void __mnt_drop_write(struct vfsmount *mnt)
455 mnt_dec_writers(real_mount(mnt));
460 * mnt_drop_write - give up write access to a mount
461 * @mnt: the mount on which to give up write access
463 * Tells the low-level filesystem that we are done performing writes to it and
464 * also allows filesystem to be frozen again. Must be matched with
465 * mnt_want_write() call above.
467 void mnt_drop_write(struct vfsmount *mnt)
469 __mnt_drop_write(mnt);
470 sb_end_write(mnt->mnt_sb);
472 EXPORT_SYMBOL_GPL(mnt_drop_write);
474 void __mnt_drop_write_file(struct file *file)
476 __mnt_drop_write(file->f_path.mnt);
479 void mnt_drop_write_file(struct file *file)
481 mnt_drop_write(file->f_path.mnt);
483 EXPORT_SYMBOL(mnt_drop_write_file);
485 static int mnt_make_readonly(struct mount *mnt)
490 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
492 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
493 * should be visible before we do.
498 * With writers on hold, if this value is zero, then there are
499 * definitely no active writers (although held writers may subsequently
500 * increment the count, they'll have to wait, and decrement it after
501 * seeing MNT_READONLY).
503 * It is OK to have counter incremented on one CPU and decremented on
504 * another: the sum will add up correctly. The danger would be when we
505 * sum up each counter, if we read a counter before it is incremented,
506 * but then read another CPU's count which it has been subsequently
507 * decremented from -- we would see more decrements than we should.
508 * MNT_WRITE_HOLD protects against this scenario, because
509 * mnt_want_write first increments count, then smp_mb, then spins on
510 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
511 * we're counting up here.
513 if (mnt_get_writers(mnt) > 0)
516 mnt->mnt.mnt_flags |= MNT_READONLY;
518 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
519 * that become unheld will see MNT_READONLY.
522 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
527 static void __mnt_unmake_readonly(struct mount *mnt)
530 mnt->mnt.mnt_flags &= ~MNT_READONLY;
534 int sb_prepare_remount_readonly(struct super_block *sb)
539 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
540 if (atomic_long_read(&sb->s_remove_count))
544 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
545 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
546 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
548 if (mnt_get_writers(mnt) > 0) {
554 if (!err && atomic_long_read(&sb->s_remove_count))
558 sb->s_readonly_remount = 1;
561 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
562 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
563 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
570 static void free_vfsmnt(struct mount *mnt)
572 kfree(mnt->mnt_devname);
575 free_percpu(mnt->mnt_pcp);
577 kmem_cache_free(mnt_cache, mnt);
580 /* call under rcu_read_lock */
581 bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
584 if (read_seqretry(&mount_lock, seq))
588 mnt = real_mount(bastard);
589 mnt_add_count(mnt, 1);
590 if (likely(!read_seqretry(&mount_lock, seq)))
592 if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
593 mnt_add_count(mnt, -1);
603 * find the first mount at @dentry on vfsmount @mnt.
604 * call under rcu_read_lock()
606 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
608 struct hlist_head *head = m_hash(mnt, dentry);
611 hlist_for_each_entry_rcu(p, head, mnt_hash)
612 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
618 * find the last mount at @dentry on vfsmount @mnt.
619 * mount_lock must be held.
621 struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
623 struct mount *p, *res;
624 res = p = __lookup_mnt(mnt, dentry);
627 hlist_for_each_entry_continue(p, mnt_hash) {
628 if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
637 * lookup_mnt - Return the first child mount mounted at path
639 * "First" means first mounted chronologically. If you create the
642 * mount /dev/sda1 /mnt
643 * mount /dev/sda2 /mnt
644 * mount /dev/sda3 /mnt
646 * Then lookup_mnt() on the base /mnt dentry in the root mount will
647 * return successively the root dentry and vfsmount of /dev/sda1, then
648 * /dev/sda2, then /dev/sda3, then NULL.
650 * lookup_mnt takes a reference to the found vfsmount.
652 struct vfsmount *lookup_mnt(struct path *path)
654 struct mount *child_mnt;
660 seq = read_seqbegin(&mount_lock);
661 child_mnt = __lookup_mnt(path->mnt, path->dentry);
662 m = child_mnt ? &child_mnt->mnt : NULL;
663 } while (!legitimize_mnt(m, seq));
668 static struct mountpoint *new_mountpoint(struct dentry *dentry)
670 struct hlist_head *chain = mp_hash(dentry);
671 struct mountpoint *mp;
674 hlist_for_each_entry(mp, chain, m_hash) {
675 if (mp->m_dentry == dentry) {
676 /* might be worth a WARN_ON() */
677 if (d_unlinked(dentry))
678 return ERR_PTR(-ENOENT);
684 mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
686 return ERR_PTR(-ENOMEM);
688 ret = d_set_mounted(dentry);
694 mp->m_dentry = dentry;
696 hlist_add_head(&mp->m_hash, chain);
700 static void put_mountpoint(struct mountpoint *mp)
702 if (!--mp->m_count) {
703 struct dentry *dentry = mp->m_dentry;
704 spin_lock(&dentry->d_lock);
705 dentry->d_flags &= ~DCACHE_MOUNTED;
706 spin_unlock(&dentry->d_lock);
707 hlist_del(&mp->m_hash);
712 static inline int check_mnt(struct mount *mnt)
714 return mnt->mnt_ns == current->nsproxy->mnt_ns;
718 * vfsmount lock must be held for write
720 static void touch_mnt_namespace(struct mnt_namespace *ns)
724 wake_up_interruptible(&ns->poll);
729 * vfsmount lock must be held for write
731 static void __touch_mnt_namespace(struct mnt_namespace *ns)
733 if (ns && ns->event != event) {
735 wake_up_interruptible(&ns->poll);
740 * vfsmount lock must be held for write
742 static void detach_mnt(struct mount *mnt, struct path *old_path)
744 old_path->dentry = mnt->mnt_mountpoint;
745 old_path->mnt = &mnt->mnt_parent->mnt;
746 mnt->mnt_parent = mnt;
747 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
748 list_del_init(&mnt->mnt_child);
749 hlist_del_init_rcu(&mnt->mnt_hash);
750 put_mountpoint(mnt->mnt_mp);
755 * vfsmount lock must be held for write
757 void mnt_set_mountpoint(struct mount *mnt,
758 struct mountpoint *mp,
759 struct mount *child_mnt)
762 mnt_add_count(mnt, 1); /* essentially, that's mntget */
763 child_mnt->mnt_mountpoint = dget(mp->m_dentry);
764 child_mnt->mnt_parent = mnt;
765 child_mnt->mnt_mp = mp;
769 * vfsmount lock must be held for write
771 static void attach_mnt(struct mount *mnt,
772 struct mount *parent,
773 struct mountpoint *mp)
775 mnt_set_mountpoint(parent, mp, mnt);
776 hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry));
777 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
781 * vfsmount lock must be held for write
783 static void commit_tree(struct mount *mnt, struct mount *shadows)
785 struct mount *parent = mnt->mnt_parent;
788 struct mnt_namespace *n = parent->mnt_ns;
790 BUG_ON(parent == mnt);
792 list_add_tail(&head, &mnt->mnt_list);
793 list_for_each_entry(m, &head, mnt_list)
796 list_splice(&head, n->list.prev);
799 hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash);
801 hlist_add_head_rcu(&mnt->mnt_hash,
802 m_hash(&parent->mnt, mnt->mnt_mountpoint));
803 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
804 touch_mnt_namespace(n);
807 static struct mount *next_mnt(struct mount *p, struct mount *root)
809 struct list_head *next = p->mnt_mounts.next;
810 if (next == &p->mnt_mounts) {
814 next = p->mnt_child.next;
815 if (next != &p->mnt_parent->mnt_mounts)
820 return list_entry(next, struct mount, mnt_child);
823 static struct mount *skip_mnt_tree(struct mount *p)
825 struct list_head *prev = p->mnt_mounts.prev;
826 while (prev != &p->mnt_mounts) {
827 p = list_entry(prev, struct mount, mnt_child);
828 prev = p->mnt_mounts.prev;
834 vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
840 return ERR_PTR(-ENODEV);
842 mnt = alloc_vfsmnt(name);
844 return ERR_PTR(-ENOMEM);
846 if (flags & MS_KERNMOUNT)
847 mnt->mnt.mnt_flags = MNT_INTERNAL;
849 root = mount_fs(type, flags, name, data);
852 return ERR_CAST(root);
855 mnt->mnt.mnt_root = root;
856 mnt->mnt.mnt_sb = root->d_sb;
857 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
858 mnt->mnt_parent = mnt;
860 list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
864 EXPORT_SYMBOL_GPL(vfs_kern_mount);
866 static struct mount *clone_mnt(struct mount *old, struct dentry *root,
869 struct super_block *sb = old->mnt.mnt_sb;
873 mnt = alloc_vfsmnt(old->mnt_devname);
875 return ERR_PTR(-ENOMEM);
877 if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
878 mnt->mnt_group_id = 0; /* not a peer of original */
880 mnt->mnt_group_id = old->mnt_group_id;
882 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
883 err = mnt_alloc_group_id(mnt);
888 mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
889 /* Don't allow unprivileged users to change mount flags */
890 if (flag & CL_UNPRIVILEGED) {
891 mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
893 if (mnt->mnt.mnt_flags & MNT_READONLY)
894 mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
896 if (mnt->mnt.mnt_flags & MNT_NODEV)
897 mnt->mnt.mnt_flags |= MNT_LOCK_NODEV;
899 if (mnt->mnt.mnt_flags & MNT_NOSUID)
900 mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID;
902 if (mnt->mnt.mnt_flags & MNT_NOEXEC)
903 mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC;
906 /* Don't allow unprivileged users to reveal what is under a mount */
907 if ((flag & CL_UNPRIVILEGED) && list_empty(&old->mnt_expire))
908 mnt->mnt.mnt_flags |= MNT_LOCKED;
910 atomic_inc(&sb->s_active);
911 mnt->mnt.mnt_sb = sb;
912 mnt->mnt.mnt_root = dget(root);
913 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
914 mnt->mnt_parent = mnt;
916 list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
919 if ((flag & CL_SLAVE) ||
920 ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
921 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
922 mnt->mnt_master = old;
923 CLEAR_MNT_SHARED(mnt);
924 } else if (!(flag & CL_PRIVATE)) {
925 if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
926 list_add(&mnt->mnt_share, &old->mnt_share);
927 if (IS_MNT_SLAVE(old))
928 list_add(&mnt->mnt_slave, &old->mnt_slave);
929 mnt->mnt_master = old->mnt_master;
931 if (flag & CL_MAKE_SHARED)
934 /* stick the duplicate mount on the same expiry list
935 * as the original if that was on one */
936 if (flag & CL_EXPIRE) {
937 if (!list_empty(&old->mnt_expire))
938 list_add(&mnt->mnt_expire, &old->mnt_expire);
948 static void delayed_free(struct rcu_head *head)
950 struct mount *mnt = container_of(head, struct mount, mnt_rcu);
951 kfree(mnt->mnt_devname);
953 free_percpu(mnt->mnt_pcp);
955 kmem_cache_free(mnt_cache, mnt);
958 static void mntput_no_expire(struct mount *mnt)
962 mnt_add_count(mnt, -1);
963 if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */
968 if (mnt_get_count(mnt)) {
973 if (unlikely(mnt->mnt_pinned)) {
974 mnt_add_count(mnt, mnt->mnt_pinned + 1);
978 acct_auto_close_mnt(&mnt->mnt);
981 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
986 mnt->mnt.mnt_flags |= MNT_DOOMED;
989 list_del(&mnt->mnt_instance);
993 * This probably indicates that somebody messed
994 * up a mnt_want/drop_write() pair. If this
995 * happens, the filesystem was probably unable
996 * to make r/w->r/o transitions.
999 * The locking used to deal with mnt_count decrement provides barriers,
1000 * so mnt_get_writers() below is safe.
1002 WARN_ON(mnt_get_writers(mnt));
1003 fsnotify_vfsmount_delete(&mnt->mnt);
1004 dput(mnt->mnt.mnt_root);
1005 deactivate_super(mnt->mnt.mnt_sb);
1007 call_rcu(&mnt->mnt_rcu, delayed_free);
1010 void mntput(struct vfsmount *mnt)
1013 struct mount *m = real_mount(mnt);
1014 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
1015 if (unlikely(m->mnt_expiry_mark))
1016 m->mnt_expiry_mark = 0;
1017 mntput_no_expire(m);
1020 EXPORT_SYMBOL(mntput);
1022 struct vfsmount *mntget(struct vfsmount *mnt)
1025 mnt_add_count(real_mount(mnt), 1);
1028 EXPORT_SYMBOL(mntget);
1030 void mnt_pin(struct vfsmount *mnt)
1033 real_mount(mnt)->mnt_pinned++;
1034 unlock_mount_hash();
1036 EXPORT_SYMBOL(mnt_pin);
1038 void mnt_unpin(struct vfsmount *m)
1040 struct mount *mnt = real_mount(m);
1042 if (mnt->mnt_pinned) {
1043 mnt_add_count(mnt, 1);
1046 unlock_mount_hash();
1048 EXPORT_SYMBOL(mnt_unpin);
1050 static inline void mangle(struct seq_file *m, const char *s)
1052 seq_escape(m, s, " \t\n\\");
1056 * Simple .show_options callback for filesystems which don't want to
1057 * implement more complex mount option showing.
1059 * See also save_mount_options().
1061 int generic_show_options(struct seq_file *m, struct dentry *root)
1063 const char *options;
1066 options = rcu_dereference(root->d_sb->s_options);
1068 if (options != NULL && options[0]) {
1076 EXPORT_SYMBOL(generic_show_options);
1079 * If filesystem uses generic_show_options(), this function should be
1080 * called from the fill_super() callback.
1082 * The .remount_fs callback usually needs to be handled in a special
1083 * way, to make sure, that previous options are not overwritten if the
1086 * Also note, that if the filesystem's .remount_fs function doesn't
1087 * reset all options to their default value, but changes only newly
1088 * given options, then the displayed options will not reflect reality
1091 void save_mount_options(struct super_block *sb, char *options)
1093 BUG_ON(sb->s_options);
1094 rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL));
1096 EXPORT_SYMBOL(save_mount_options);
1098 void replace_mount_options(struct super_block *sb, char *options)
1100 char *old = sb->s_options;
1101 rcu_assign_pointer(sb->s_options, options);
1107 EXPORT_SYMBOL(replace_mount_options);
1109 #ifdef CONFIG_PROC_FS
1110 /* iterator; we want it to have access to namespace_sem, thus here... */
1111 static void *m_start(struct seq_file *m, loff_t *pos)
1113 struct proc_mounts *p = proc_mounts(m);
1115 down_read(&namespace_sem);
1116 return seq_list_start(&p->ns->list, *pos);
1119 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
1121 struct proc_mounts *p = proc_mounts(m);
1123 return seq_list_next(v, &p->ns->list, pos);
1126 static void m_stop(struct seq_file *m, void *v)
1128 up_read(&namespace_sem);
1131 static int m_show(struct seq_file *m, void *v)
1133 struct proc_mounts *p = proc_mounts(m);
1134 struct mount *r = list_entry(v, struct mount, mnt_list);
1135 return p->show(m, &r->mnt);
1138 const struct seq_operations mounts_op = {
1144 #endif /* CONFIG_PROC_FS */
1147 * may_umount_tree - check if a mount tree is busy
1148 * @mnt: root of mount tree
1150 * This is called to check if a tree of mounts has any
1151 * open files, pwds, chroots or sub mounts that are
1154 int may_umount_tree(struct vfsmount *m)
1156 struct mount *mnt = real_mount(m);
1157 int actual_refs = 0;
1158 int minimum_refs = 0;
1162 /* write lock needed for mnt_get_count */
1164 for (p = mnt; p; p = next_mnt(p, mnt)) {
1165 actual_refs += mnt_get_count(p);
1168 unlock_mount_hash();
1170 if (actual_refs > minimum_refs)
1176 EXPORT_SYMBOL(may_umount_tree);
1179 * may_umount - check if a mount point is busy
1180 * @mnt: root of mount
1182 * This is called to check if a mount point has any
1183 * open files, pwds, chroots or sub mounts. If the
1184 * mount has sub mounts this will return busy
1185 * regardless of whether the sub mounts are busy.
1187 * Doesn't take quota and stuff into account. IOW, in some cases it will
1188 * give false negatives. The main reason why it's here is that we need
1189 * a non-destructive way to look for easily umountable filesystems.
1191 int may_umount(struct vfsmount *mnt)
1194 down_read(&namespace_sem);
1196 if (propagate_mount_busy(real_mount(mnt), 2))
1198 unlock_mount_hash();
1199 up_read(&namespace_sem);
1203 EXPORT_SYMBOL(may_umount);
1205 static HLIST_HEAD(unmounted); /* protected by namespace_sem */
1207 static void namespace_unlock(void)
1210 struct hlist_head head = unmounted;
1212 if (likely(hlist_empty(&head))) {
1213 up_write(&namespace_sem);
1217 head.first->pprev = &head.first;
1218 INIT_HLIST_HEAD(&unmounted);
1220 up_write(&namespace_sem);
1224 while (!hlist_empty(&head)) {
1225 mnt = hlist_entry(head.first, struct mount, mnt_hash);
1226 hlist_del_init(&mnt->mnt_hash);
1227 if (mnt->mnt_ex_mountpoint.mnt)
1228 path_put(&mnt->mnt_ex_mountpoint);
1233 static inline void namespace_lock(void)
1235 down_write(&namespace_sem);
1239 * mount_lock must be held
1240 * namespace_sem must be held for write
1241 * how = 0 => just this tree, don't propagate
1242 * how = 1 => propagate; we know that nobody else has reference to any victims
1243 * how = 2 => lazy umount
1245 void umount_tree(struct mount *mnt, int how)
1247 HLIST_HEAD(tmp_list);
1249 struct mount *last = NULL;
1251 for (p = mnt; p; p = next_mnt(p, mnt)) {
1252 hlist_del_init_rcu(&p->mnt_hash);
1253 hlist_add_head(&p->mnt_hash, &tmp_list);
1257 propagate_umount(&tmp_list);
1259 hlist_for_each_entry(p, &tmp_list, mnt_hash) {
1260 list_del_init(&p->mnt_expire);
1261 list_del_init(&p->mnt_list);
1262 __touch_mnt_namespace(p->mnt_ns);
1265 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1266 list_del_init(&p->mnt_child);
1267 if (mnt_has_parent(p)) {
1268 put_mountpoint(p->mnt_mp);
1269 /* move the reference to mountpoint into ->mnt_ex_mountpoint */
1270 p->mnt_ex_mountpoint.dentry = p->mnt_mountpoint;
1271 p->mnt_ex_mountpoint.mnt = &p->mnt_parent->mnt;
1272 p->mnt_mountpoint = p->mnt.mnt_root;
1276 change_mnt_propagation(p, MS_PRIVATE);
1280 last->mnt_hash.next = unmounted.first;
1281 unmounted.first = tmp_list.first;
1282 unmounted.first->pprev = &unmounted.first;
1286 static void shrink_submounts(struct mount *mnt);
1288 static int do_umount(struct mount *mnt, int flags)
1290 struct super_block *sb = mnt->mnt.mnt_sb;
1293 retval = security_sb_umount(&mnt->mnt, flags);
1298 * Allow userspace to request a mountpoint be expired rather than
1299 * unmounting unconditionally. Unmount only happens if:
1300 * (1) the mark is already set (the mark is cleared by mntput())
1301 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1303 if (flags & MNT_EXPIRE) {
1304 if (&mnt->mnt == current->fs->root.mnt ||
1305 flags & (MNT_FORCE | MNT_DETACH))
1309 * probably don't strictly need the lock here if we examined
1310 * all race cases, but it's a slowpath.
1313 if (mnt_get_count(mnt) != 2) {
1314 unlock_mount_hash();
1317 unlock_mount_hash();
1319 if (!xchg(&mnt->mnt_expiry_mark, 1))
1324 * If we may have to abort operations to get out of this
1325 * mount, and they will themselves hold resources we must
1326 * allow the fs to do things. In the Unix tradition of
1327 * 'Gee thats tricky lets do it in userspace' the umount_begin
1328 * might fail to complete on the first run through as other tasks
1329 * must return, and the like. Thats for the mount program to worry
1330 * about for the moment.
1333 if (flags & MNT_FORCE && sb->s_op->umount_begin) {
1334 sb->s_op->umount_begin(sb);
1338 * No sense to grab the lock for this test, but test itself looks
1339 * somewhat bogus. Suggestions for better replacement?
1340 * Ho-hum... In principle, we might treat that as umount + switch
1341 * to rootfs. GC would eventually take care of the old vfsmount.
1342 * Actually it makes sense, especially if rootfs would contain a
1343 * /reboot - static binary that would close all descriptors and
1344 * call reboot(9). Then init(8) could umount root and exec /reboot.
1346 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1348 * Special case for "unmounting" root ...
1349 * we just try to remount it readonly.
1351 down_write(&sb->s_umount);
1352 if (!(sb->s_flags & MS_RDONLY))
1353 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
1354 up_write(&sb->s_umount);
1362 if (flags & MNT_DETACH) {
1363 if (!list_empty(&mnt->mnt_list))
1364 umount_tree(mnt, 2);
1367 shrink_submounts(mnt);
1369 if (!propagate_mount_busy(mnt, 2)) {
1370 if (!list_empty(&mnt->mnt_list))
1371 umount_tree(mnt, 1);
1375 unlock_mount_hash();
1381 * Is the caller allowed to modify his namespace?
1383 static inline bool may_mount(void)
1385 return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
1389 * Now umount can handle mount points as well as block devices.
1390 * This is important for filesystems which use unnamed block devices.
1392 * We now support a flag for forced unmount like the other 'big iron'
1393 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
1396 SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
1401 int lookup_flags = 0;
1403 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
1409 if (!(flags & UMOUNT_NOFOLLOW))
1410 lookup_flags |= LOOKUP_FOLLOW;
1412 retval = user_path_mountpoint_at(AT_FDCWD, name, lookup_flags, &path);
1415 mnt = real_mount(path.mnt);
1417 if (path.dentry != path.mnt->mnt_root)
1419 if (!check_mnt(mnt))
1421 if (mnt->mnt.mnt_flags & MNT_LOCKED)
1424 retval = do_umount(mnt, flags);
1426 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1428 mntput_no_expire(mnt);
1433 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1436 * The 2.0 compatible umount. No flags.
1438 SYSCALL_DEFINE1(oldumount, char __user *, name)
1440 return sys_umount(name, 0);
1445 static bool is_mnt_ns_file(struct dentry *dentry)
1447 /* Is this a proxy for a mount namespace? */
1448 struct inode *inode = dentry->d_inode;
1451 if (!proc_ns_inode(inode))
1454 ei = get_proc_ns(inode);
1455 if (ei->ns_ops != &mntns_operations)
1461 static bool mnt_ns_loop(struct dentry *dentry)
1463 /* Could bind mounting the mount namespace inode cause a
1464 * mount namespace loop?
1466 struct mnt_namespace *mnt_ns;
1467 if (!is_mnt_ns_file(dentry))
1470 mnt_ns = get_proc_ns(dentry->d_inode)->ns;
1471 return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
1474 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1477 struct mount *res, *p, *q, *r, *parent;
1479 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt))
1480 return ERR_PTR(-EINVAL);
1482 if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry))
1483 return ERR_PTR(-EINVAL);
1485 res = q = clone_mnt(mnt, dentry, flag);
1489 q->mnt.mnt_flags &= ~MNT_LOCKED;
1490 q->mnt_mountpoint = mnt->mnt_mountpoint;
1493 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1495 if (!is_subdir(r->mnt_mountpoint, dentry))
1498 for (s = r; s; s = next_mnt(s, r)) {
1499 if (!(flag & CL_COPY_UNBINDABLE) &&
1500 IS_MNT_UNBINDABLE(s)) {
1501 s = skip_mnt_tree(s);
1504 if (!(flag & CL_COPY_MNT_NS_FILE) &&
1505 is_mnt_ns_file(s->mnt.mnt_root)) {
1506 s = skip_mnt_tree(s);
1509 while (p != s->mnt_parent) {
1515 q = clone_mnt(p, p->mnt.mnt_root, flag);
1519 list_add_tail(&q->mnt_list, &res->mnt_list);
1520 attach_mnt(q, parent, p->mnt_mp);
1521 unlock_mount_hash();
1528 umount_tree(res, 0);
1529 unlock_mount_hash();
1534 /* Caller should check returned pointer for errors */
1536 struct vfsmount *collect_mounts(struct path *path)
1540 tree = copy_tree(real_mount(path->mnt), path->dentry,
1541 CL_COPY_ALL | CL_PRIVATE);
1544 return ERR_CAST(tree);
1548 void drop_collected_mounts(struct vfsmount *mnt)
1552 umount_tree(real_mount(mnt), 0);
1553 unlock_mount_hash();
1557 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
1558 struct vfsmount *root)
1561 int res = f(root, arg);
1564 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
1565 res = f(&mnt->mnt, arg);
1572 static void cleanup_group_ids(struct mount *mnt, struct mount *end)
1576 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
1577 if (p->mnt_group_id && !IS_MNT_SHARED(p))
1578 mnt_release_group_id(p);
1582 static int invent_group_ids(struct mount *mnt, bool recurse)
1586 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
1587 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
1588 int err = mnt_alloc_group_id(p);
1590 cleanup_group_ids(mnt, p);
1600 * @source_mnt : mount tree to be attached
1601 * @nd : place the mount tree @source_mnt is attached
1602 * @parent_nd : if non-null, detach the source_mnt from its parent and
1603 * store the parent mount and mountpoint dentry.
1604 * (done when source_mnt is moved)
1606 * NOTE: in the table below explains the semantics when a source mount
1607 * of a given type is attached to a destination mount of a given type.
1608 * ---------------------------------------------------------------------------
1609 * | BIND MOUNT OPERATION |
1610 * |**************************************************************************
1611 * | source-->| shared | private | slave | unbindable |
1615 * |**************************************************************************
1616 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
1618 * |non-shared| shared (+) | private | slave (*) | invalid |
1619 * ***************************************************************************
1620 * A bind operation clones the source mount and mounts the clone on the
1621 * destination mount.
1623 * (++) the cloned mount is propagated to all the mounts in the propagation
1624 * tree of the destination mount and the cloned mount is added to
1625 * the peer group of the source mount.
1626 * (+) the cloned mount is created under the destination mount and is marked
1627 * as shared. The cloned mount is added to the peer group of the source
1629 * (+++) the mount is propagated to all the mounts in the propagation tree
1630 * of the destination mount and the cloned mount is made slave
1631 * of the same master as that of the source mount. The cloned mount
1632 * is marked as 'shared and slave'.
1633 * (*) the cloned mount is made a slave of the same master as that of the
1636 * ---------------------------------------------------------------------------
1637 * | MOVE MOUNT OPERATION |
1638 * |**************************************************************************
1639 * | source-->| shared | private | slave | unbindable |
1643 * |**************************************************************************
1644 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
1646 * |non-shared| shared (+*) | private | slave (*) | unbindable |
1647 * ***************************************************************************
1649 * (+) the mount is moved to the destination. And is then propagated to
1650 * all the mounts in the propagation tree of the destination mount.
1651 * (+*) the mount is moved to the destination.
1652 * (+++) the mount is moved to the destination and is then propagated to
1653 * all the mounts belonging to the destination mount's propagation tree.
1654 * the mount is marked as 'shared and slave'.
1655 * (*) the mount continues to be a slave at the new location.
1657 * if the source mount is a tree, the operations explained above is
1658 * applied to each mount in the tree.
1659 * Must be called without spinlocks held, since this function can sleep
1662 static int attach_recursive_mnt(struct mount *source_mnt,
1663 struct mount *dest_mnt,
1664 struct mountpoint *dest_mp,
1665 struct path *parent_path)
1667 HLIST_HEAD(tree_list);
1668 struct mount *child, *p;
1669 struct hlist_node *n;
1672 if (IS_MNT_SHARED(dest_mnt)) {
1673 err = invent_group_ids(source_mnt, true);
1676 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
1679 goto out_cleanup_ids;
1680 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
1686 detach_mnt(source_mnt, parent_path);
1687 attach_mnt(source_mnt, dest_mnt, dest_mp);
1688 touch_mnt_namespace(source_mnt->mnt_ns);
1690 mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
1691 commit_tree(source_mnt, NULL);
1694 hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
1696 hlist_del_init(&child->mnt_hash);
1697 q = __lookup_mnt_last(&child->mnt_parent->mnt,
1698 child->mnt_mountpoint);
1699 commit_tree(child, q);
1701 unlock_mount_hash();
1706 while (!hlist_empty(&tree_list)) {
1707 child = hlist_entry(tree_list.first, struct mount, mnt_hash);
1708 umount_tree(child, 0);
1710 unlock_mount_hash();
1711 cleanup_group_ids(source_mnt, NULL);
1716 static struct mountpoint *lock_mount(struct path *path)
1718 struct vfsmount *mnt;
1719 struct dentry *dentry = path->dentry;
1721 mutex_lock(&dentry->d_inode->i_mutex);
1722 if (unlikely(cant_mount(dentry))) {
1723 mutex_unlock(&dentry->d_inode->i_mutex);
1724 return ERR_PTR(-ENOENT);
1727 mnt = lookup_mnt(path);
1729 struct mountpoint *mp = new_mountpoint(dentry);
1732 mutex_unlock(&dentry->d_inode->i_mutex);
1738 mutex_unlock(&path->dentry->d_inode->i_mutex);
1741 dentry = path->dentry = dget(mnt->mnt_root);
1745 static void unlock_mount(struct mountpoint *where)
1747 struct dentry *dentry = where->m_dentry;
1748 put_mountpoint(where);
1750 mutex_unlock(&dentry->d_inode->i_mutex);
1753 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
1755 if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER)
1758 if (S_ISDIR(mp->m_dentry->d_inode->i_mode) !=
1759 S_ISDIR(mnt->mnt.mnt_root->d_inode->i_mode))
1762 return attach_recursive_mnt(mnt, p, mp, NULL);
1766 * Sanity check the flags to change_mnt_propagation.
1769 static int flags_to_propagation_type(int flags)
1771 int type = flags & ~(MS_REC | MS_SILENT);
1773 /* Fail if any non-propagation flags are set */
1774 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
1776 /* Only one propagation flag should be set */
1777 if (!is_power_of_2(type))
1783 * recursively change the type of the mountpoint.
1785 static int do_change_type(struct path *path, int flag)
1788 struct mount *mnt = real_mount(path->mnt);
1789 int recurse = flag & MS_REC;
1793 if (path->dentry != path->mnt->mnt_root)
1796 type = flags_to_propagation_type(flag);
1801 if (type == MS_SHARED) {
1802 err = invent_group_ids(mnt, recurse);
1808 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
1809 change_mnt_propagation(m, type);
1810 unlock_mount_hash();
1817 static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
1819 struct mount *child;
1820 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
1821 if (!is_subdir(child->mnt_mountpoint, dentry))
1824 if (child->mnt.mnt_flags & MNT_LOCKED)
1831 * do loopback mount.
1833 static int do_loopback(struct path *path, const char *old_name,
1836 struct path old_path;
1837 struct mount *mnt = NULL, *old, *parent;
1838 struct mountpoint *mp;
1840 if (!old_name || !*old_name)
1842 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
1847 if (mnt_ns_loop(old_path.dentry))
1850 mp = lock_mount(path);
1855 old = real_mount(old_path.mnt);
1856 parent = real_mount(path->mnt);
1859 if (IS_MNT_UNBINDABLE(old))
1862 if (!check_mnt(parent) || !check_mnt(old))
1865 if (!recurse && has_locked_children(old, old_path.dentry))
1869 mnt = copy_tree(old, old_path.dentry, CL_COPY_MNT_NS_FILE);
1871 mnt = clone_mnt(old, old_path.dentry, 0);
1878 mnt->mnt.mnt_flags &= ~MNT_LOCKED;
1880 err = graft_tree(mnt, parent, mp);
1883 umount_tree(mnt, 0);
1884 unlock_mount_hash();
1889 path_put(&old_path);
1893 static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
1896 int readonly_request = 0;
1898 if (ms_flags & MS_RDONLY)
1899 readonly_request = 1;
1900 if (readonly_request == __mnt_is_readonly(mnt))
1903 if (readonly_request)
1904 error = mnt_make_readonly(real_mount(mnt));
1906 __mnt_unmake_readonly(real_mount(mnt));
1911 * change filesystem flags. dir should be a physical root of filesystem.
1912 * If you've mounted a non-root directory somewhere and want to do remount
1913 * on it - tough luck.
1915 static int do_remount(struct path *path, int flags, int mnt_flags,
1919 struct super_block *sb = path->mnt->mnt_sb;
1920 struct mount *mnt = real_mount(path->mnt);
1922 if (!check_mnt(mnt))
1925 if (path->dentry != path->mnt->mnt_root)
1928 /* Don't allow changing of locked mnt flags.
1930 * No locks need to be held here while testing the various
1931 * MNT_LOCK flags because those flags can never be cleared
1932 * once they are set.
1934 if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
1935 !(mnt_flags & MNT_READONLY)) {
1938 if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
1939 !(mnt_flags & MNT_NODEV)) {
1942 if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
1943 !(mnt_flags & MNT_NOSUID)) {
1946 if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) &&
1947 !(mnt_flags & MNT_NOEXEC)) {
1950 if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) &&
1951 ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) {
1955 err = security_sb_remount(sb, data);
1959 down_write(&sb->s_umount);
1960 if (flags & MS_BIND)
1961 err = change_mount_flags(path->mnt, flags);
1962 else if (!capable(CAP_SYS_ADMIN))
1965 err = do_remount_sb(sb, flags, data, 0);
1968 mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
1969 mnt->mnt.mnt_flags = mnt_flags;
1970 touch_mnt_namespace(mnt->mnt_ns);
1971 unlock_mount_hash();
1973 up_write(&sb->s_umount);
1977 static inline int tree_contains_unbindable(struct mount *mnt)
1980 for (p = mnt; p; p = next_mnt(p, mnt)) {
1981 if (IS_MNT_UNBINDABLE(p))
1987 static int do_move_mount(struct path *path, const char *old_name)
1989 struct path old_path, parent_path;
1992 struct mountpoint *mp;
1994 if (!old_name || !*old_name)
1996 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
2000 mp = lock_mount(path);
2005 old = real_mount(old_path.mnt);
2006 p = real_mount(path->mnt);
2009 if (!check_mnt(p) || !check_mnt(old))
2012 if (old->mnt.mnt_flags & MNT_LOCKED)
2016 if (old_path.dentry != old_path.mnt->mnt_root)
2019 if (!mnt_has_parent(old))
2022 if (S_ISDIR(path->dentry->d_inode->i_mode) !=
2023 S_ISDIR(old_path.dentry->d_inode->i_mode))
2026 * Don't move a mount residing in a shared parent.
2028 if (IS_MNT_SHARED(old->mnt_parent))
2031 * Don't move a mount tree containing unbindable mounts to a destination
2032 * mount which is shared.
2034 if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
2037 for (; mnt_has_parent(p); p = p->mnt_parent)
2041 err = attach_recursive_mnt(old, real_mount(path->mnt), mp, &parent_path);
2045 /* if the mount is moved, it should no longer be expire
2047 list_del_init(&old->mnt_expire);
2052 path_put(&parent_path);
2053 path_put(&old_path);
2057 static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype)
2060 const char *subtype = strchr(fstype, '.');
2069 mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL);
2071 if (!mnt->mnt_sb->s_subtype)
2077 return ERR_PTR(err);
2081 * add a mount into a namespace's mount tree
2083 static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
2085 struct mountpoint *mp;
2086 struct mount *parent;
2089 mnt_flags &= ~MNT_INTERNAL_FLAGS;
2091 mp = lock_mount(path);
2095 parent = real_mount(path->mnt);
2097 if (unlikely(!check_mnt(parent))) {
2098 /* that's acceptable only for automounts done in private ns */
2099 if (!(mnt_flags & MNT_SHRINKABLE))
2101 /* ... and for those we'd better have mountpoint still alive */
2102 if (!parent->mnt_ns)
2106 /* Refuse the same filesystem on the same mount point */
2108 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb &&
2109 path->mnt->mnt_root == path->dentry)
2113 if (S_ISLNK(newmnt->mnt.mnt_root->d_inode->i_mode))
2116 newmnt->mnt.mnt_flags = mnt_flags;
2117 err = graft_tree(newmnt, parent, mp);
2125 * create a new mount for userspace and request it to be added into the
2128 static int do_new_mount(struct path *path, const char *fstype, int flags,
2129 int mnt_flags, const char *name, void *data)
2131 struct file_system_type *type;
2132 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
2133 struct vfsmount *mnt;
2139 type = get_fs_type(fstype);
2143 if (user_ns != &init_user_ns) {
2144 if (!(type->fs_flags & FS_USERNS_MOUNT)) {
2145 put_filesystem(type);
2148 /* Only in special cases allow devices from mounts
2149 * created outside the initial user namespace.
2151 if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) {
2153 mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
2157 mnt = vfs_kern_mount(type, flags, name, data);
2158 if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) &&
2159 !mnt->mnt_sb->s_subtype)
2160 mnt = fs_set_subtype(mnt, fstype);
2162 put_filesystem(type);
2164 return PTR_ERR(mnt);
2166 err = do_add_mount(real_mount(mnt), path, mnt_flags);
2172 int finish_automount(struct vfsmount *m, struct path *path)
2174 struct mount *mnt = real_mount(m);
2176 /* The new mount record should have at least 2 refs to prevent it being
2177 * expired before we get a chance to add it
2179 BUG_ON(mnt_get_count(mnt) < 2);
2181 if (m->mnt_sb == path->mnt->mnt_sb &&
2182 m->mnt_root == path->dentry) {
2187 err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
2191 /* remove m from any expiration list it may be on */
2192 if (!list_empty(&mnt->mnt_expire)) {
2194 list_del_init(&mnt->mnt_expire);
2203 * mnt_set_expiry - Put a mount on an expiration list
2204 * @mnt: The mount to list.
2205 * @expiry_list: The list to add the mount to.
2207 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
2211 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
2215 EXPORT_SYMBOL(mnt_set_expiry);
2218 * process a list of expirable mountpoints with the intent of discarding any
2219 * mountpoints that aren't in use and haven't been touched since last we came
2222 void mark_mounts_for_expiry(struct list_head *mounts)
2224 struct mount *mnt, *next;
2225 LIST_HEAD(graveyard);
2227 if (list_empty(mounts))
2233 /* extract from the expiration list every vfsmount that matches the
2234 * following criteria:
2235 * - only referenced by its parent vfsmount
2236 * - still marked for expiry (marked on the last call here; marks are
2237 * cleared by mntput())
2239 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
2240 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
2241 propagate_mount_busy(mnt, 1))
2243 list_move(&mnt->mnt_expire, &graveyard);
2245 while (!list_empty(&graveyard)) {
2246 mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
2247 touch_mnt_namespace(mnt->mnt_ns);
2248 umount_tree(mnt, 1);
2250 unlock_mount_hash();
2254 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
2257 * Ripoff of 'select_parent()'
2259 * search the list of submounts for a given mountpoint, and move any
2260 * shrinkable submounts to the 'graveyard' list.
2262 static int select_submounts(struct mount *parent, struct list_head *graveyard)
2264 struct mount *this_parent = parent;
2265 struct list_head *next;
2269 next = this_parent->mnt_mounts.next;
2271 while (next != &this_parent->mnt_mounts) {
2272 struct list_head *tmp = next;
2273 struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
2276 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
2279 * Descend a level if the d_mounts list is non-empty.
2281 if (!list_empty(&mnt->mnt_mounts)) {
2286 if (!propagate_mount_busy(mnt, 1)) {
2287 list_move_tail(&mnt->mnt_expire, graveyard);
2292 * All done at this level ... ascend and resume the search
2294 if (this_parent != parent) {
2295 next = this_parent->mnt_child.next;
2296 this_parent = this_parent->mnt_parent;
2303 * process a list of expirable mountpoints with the intent of discarding any
2304 * submounts of a specific parent mountpoint
2306 * mount_lock must be held for write
2308 static void shrink_submounts(struct mount *mnt)
2310 LIST_HEAD(graveyard);
2313 /* extract submounts of 'mountpoint' from the expiration list */
2314 while (select_submounts(mnt, &graveyard)) {
2315 while (!list_empty(&graveyard)) {
2316 m = list_first_entry(&graveyard, struct mount,
2318 touch_mnt_namespace(m->mnt_ns);
2325 * Some copy_from_user() implementations do not return the exact number of
2326 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
2327 * Note that this function differs from copy_from_user() in that it will oops
2328 * on bad values of `to', rather than returning a short copy.
2330 static long exact_copy_from_user(void *to, const void __user * from,
2334 const char __user *f = from;
2337 if (!access_ok(VERIFY_READ, from, n))
2341 if (__get_user(c, f)) {
2352 int copy_mount_options(const void __user * data, unsigned long *where)
2362 if (!(page = __get_free_page(GFP_KERNEL)))
2365 /* We only care that *some* data at the address the user
2366 * gave us is valid. Just in case, we'll zero
2367 * the remainder of the page.
2369 /* copy_from_user cannot cross TASK_SIZE ! */
2370 size = TASK_SIZE - (unsigned long)data;
2371 if (size > PAGE_SIZE)
2374 i = size - exact_copy_from_user((void *)page, data, size);
2380 memset((char *)page + i, 0, PAGE_SIZE - i);
2385 int copy_mount_string(const void __user *data, char **where)
2394 tmp = strndup_user(data, PAGE_SIZE);
2396 return PTR_ERR(tmp);
2403 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
2404 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
2406 * data is a (void *) that can point to any structure up to
2407 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
2408 * information (or be NULL).
2410 * Pre-0.97 versions of mount() didn't have a flags word.
2411 * When the flags word was introduced its top half was required
2412 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
2413 * Therefore, if this magic number is present, it carries no information
2414 * and must be discarded.
2416 long do_mount(const char *dev_name, const char *dir_name,
2417 const char *type_page, unsigned long flags, void *data_page)
2424 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
2425 flags &= ~MS_MGC_MSK;
2427 /* Basic sanity checks */
2429 if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
2433 ((char *)data_page)[PAGE_SIZE - 1] = 0;
2435 /* ... and get the mountpoint */
2436 retval = kern_path(dir_name, LOOKUP_FOLLOW, &path);
2440 retval = security_sb_mount(dev_name, &path,
2441 type_page, flags, data_page);
2442 if (!retval && !may_mount())
2447 /* Default to relatime unless overriden */
2448 if (!(flags & MS_NOATIME))
2449 mnt_flags |= MNT_RELATIME;
2451 /* Separate the per-mountpoint flags */
2452 if (flags & MS_NOSUID)
2453 mnt_flags |= MNT_NOSUID;
2454 if (flags & MS_NODEV)
2455 mnt_flags |= MNT_NODEV;
2456 if (flags & MS_NOEXEC)
2457 mnt_flags |= MNT_NOEXEC;
2458 if (flags & MS_NOATIME)
2459 mnt_flags |= MNT_NOATIME;
2460 if (flags & MS_NODIRATIME)
2461 mnt_flags |= MNT_NODIRATIME;
2462 if (flags & MS_STRICTATIME)
2463 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
2464 if (flags & MS_RDONLY)
2465 mnt_flags |= MNT_READONLY;
2467 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
2468 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
2471 if (flags & MS_REMOUNT)
2472 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
2474 else if (flags & MS_BIND)
2475 retval = do_loopback(&path, dev_name, flags & MS_REC);
2476 else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2477 retval = do_change_type(&path, flags);
2478 else if (flags & MS_MOVE)
2479 retval = do_move_mount(&path, dev_name);
2481 retval = do_new_mount(&path, type_page, flags, mnt_flags,
2482 dev_name, data_page);
2488 static void free_mnt_ns(struct mnt_namespace *ns)
2490 proc_free_inum(ns->proc_inum);
2491 put_user_ns(ns->user_ns);
2496 * Assign a sequence number so we can detect when we attempt to bind
2497 * mount a reference to an older mount namespace into the current
2498 * mount namespace, preventing reference counting loops. A 64bit
2499 * number incrementing at 10Ghz will take 12,427 years to wrap which
2500 * is effectively never, so we can ignore the possibility.
2502 static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
2504 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
2506 struct mnt_namespace *new_ns;
2509 new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
2511 return ERR_PTR(-ENOMEM);
2512 ret = proc_alloc_inum(&new_ns->proc_inum);
2515 return ERR_PTR(ret);
2517 new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
2518 atomic_set(&new_ns->count, 1);
2519 new_ns->root = NULL;
2520 INIT_LIST_HEAD(&new_ns->list);
2521 init_waitqueue_head(&new_ns->poll);
2523 new_ns->user_ns = get_user_ns(user_ns);
2527 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
2528 struct user_namespace *user_ns, struct fs_struct *new_fs)
2530 struct mnt_namespace *new_ns;
2531 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
2532 struct mount *p, *q;
2539 if (likely(!(flags & CLONE_NEWNS))) {
2546 new_ns = alloc_mnt_ns(user_ns);
2551 /* First pass: copy the tree topology */
2552 copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
2553 if (user_ns != ns->user_ns)
2554 copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED;
2555 new = copy_tree(old, old->mnt.mnt_root, copy_flags);
2558 free_mnt_ns(new_ns);
2559 return ERR_CAST(new);
2562 list_add_tail(&new_ns->list, &new->mnt_list);
2565 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
2566 * as belonging to new namespace. We have already acquired a private
2567 * fs_struct, so tsk->fs->lock is not needed.
2574 if (&p->mnt == new_fs->root.mnt) {
2575 new_fs->root.mnt = mntget(&q->mnt);
2578 if (&p->mnt == new_fs->pwd.mnt) {
2579 new_fs->pwd.mnt = mntget(&q->mnt);
2583 p = next_mnt(p, old);
2584 q = next_mnt(q, new);
2587 while (p->mnt.mnt_root != q->mnt.mnt_root)
2588 p = next_mnt(p, old);
2601 * create_mnt_ns - creates a private namespace and adds a root filesystem
2602 * @mnt: pointer to the new root filesystem mountpoint
2604 static struct mnt_namespace *create_mnt_ns(struct vfsmount *m)
2606 struct mnt_namespace *new_ns = alloc_mnt_ns(&init_user_ns);
2607 if (!IS_ERR(new_ns)) {
2608 struct mount *mnt = real_mount(m);
2609 mnt->mnt_ns = new_ns;
2611 list_add(&mnt->mnt_list, &new_ns->list);
2618 struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
2620 struct mnt_namespace *ns;
2621 struct super_block *s;
2625 ns = create_mnt_ns(mnt);
2627 return ERR_CAST(ns);
2629 err = vfs_path_lookup(mnt->mnt_root, mnt,
2630 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
2635 return ERR_PTR(err);
2637 /* trade a vfsmount reference for active sb one */
2638 s = path.mnt->mnt_sb;
2639 atomic_inc(&s->s_active);
2641 /* lock the sucker */
2642 down_write(&s->s_umount);
2643 /* ... and return the root of (sub)tree on it */
2646 EXPORT_SYMBOL(mount_subtree);
2648 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
2649 char __user *, type, unsigned long, flags, void __user *, data)
2653 struct filename *kernel_dir;
2655 unsigned long data_page;
2657 ret = copy_mount_string(type, &kernel_type);
2661 kernel_dir = getname(dir_name);
2662 if (IS_ERR(kernel_dir)) {
2663 ret = PTR_ERR(kernel_dir);
2667 ret = copy_mount_string(dev_name, &kernel_dev);
2671 ret = copy_mount_options(data, &data_page);
2675 ret = do_mount(kernel_dev, kernel_dir->name, kernel_type, flags,
2676 (void *) data_page);
2678 free_page(data_page);
2682 putname(kernel_dir);
2690 * Return true if path is reachable from root
2692 * namespace_sem or mount_lock is held
2694 bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
2695 const struct path *root)
2697 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
2698 dentry = mnt->mnt_mountpoint;
2699 mnt = mnt->mnt_parent;
2701 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
2704 int path_is_under(struct path *path1, struct path *path2)
2707 read_seqlock_excl(&mount_lock);
2708 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
2709 read_sequnlock_excl(&mount_lock);
2712 EXPORT_SYMBOL(path_is_under);
2715 * pivot_root Semantics:
2716 * Moves the root file system of the current process to the directory put_old,
2717 * makes new_root as the new root file system of the current process, and sets
2718 * root/cwd of all processes which had them on the current root to new_root.
2721 * The new_root and put_old must be directories, and must not be on the
2722 * same file system as the current process root. The put_old must be
2723 * underneath new_root, i.e. adding a non-zero number of /.. to the string
2724 * pointed to by put_old must yield the same directory as new_root. No other
2725 * file system may be mounted on put_old. After all, new_root is a mountpoint.
2727 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
2728 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
2729 * in this situation.
2732 * - we don't move root/cwd if they are not at the root (reason: if something
2733 * cared enough to change them, it's probably wrong to force them elsewhere)
2734 * - it's okay to pick a root that isn't the root of a file system, e.g.
2735 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
2736 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
2739 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
2740 const char __user *, put_old)
2742 struct path new, old, parent_path, root_parent, root;
2743 struct mount *new_mnt, *root_mnt, *old_mnt;
2744 struct mountpoint *old_mp, *root_mp;
2750 error = user_path_dir(new_root, &new);
2754 error = user_path_dir(put_old, &old);
2758 error = security_sb_pivotroot(&old, &new);
2762 get_fs_root(current->fs, &root);
2763 old_mp = lock_mount(&old);
2764 error = PTR_ERR(old_mp);
2769 new_mnt = real_mount(new.mnt);
2770 root_mnt = real_mount(root.mnt);
2771 old_mnt = real_mount(old.mnt);
2772 if (IS_MNT_SHARED(old_mnt) ||
2773 IS_MNT_SHARED(new_mnt->mnt_parent) ||
2774 IS_MNT_SHARED(root_mnt->mnt_parent))
2776 if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
2778 if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
2781 if (d_unlinked(new.dentry))
2784 if (new_mnt == root_mnt || old_mnt == root_mnt)
2785 goto out4; /* loop, on the same file system */
2787 if (root.mnt->mnt_root != root.dentry)
2788 goto out4; /* not a mountpoint */
2789 if (!mnt_has_parent(root_mnt))
2790 goto out4; /* not attached */
2791 root_mp = root_mnt->mnt_mp;
2792 if (new.mnt->mnt_root != new.dentry)
2793 goto out4; /* not a mountpoint */
2794 if (!mnt_has_parent(new_mnt))
2795 goto out4; /* not attached */
2796 /* make sure we can reach put_old from new_root */
2797 if (!is_path_reachable(old_mnt, old.dentry, &new))
2799 root_mp->m_count++; /* pin it so it won't go away */
2801 detach_mnt(new_mnt, &parent_path);
2802 detach_mnt(root_mnt, &root_parent);
2803 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
2804 new_mnt->mnt.mnt_flags |= MNT_LOCKED;
2805 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
2807 /* mount old root on put_old */
2808 attach_mnt(root_mnt, old_mnt, old_mp);
2809 /* mount new_root on / */
2810 attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp);
2811 touch_mnt_namespace(current->nsproxy->mnt_ns);
2812 unlock_mount_hash();
2813 chroot_fs_refs(&root, &new);
2814 put_mountpoint(root_mp);
2817 unlock_mount(old_mp);
2819 path_put(&root_parent);
2820 path_put(&parent_path);
2832 static void __init init_mount_tree(void)
2834 struct vfsmount *mnt;
2835 struct mnt_namespace *ns;
2837 struct file_system_type *type;
2839 type = get_fs_type("rootfs");
2841 panic("Can't find rootfs type");
2842 mnt = vfs_kern_mount(type, 0, "rootfs", NULL);
2843 put_filesystem(type);
2845 panic("Can't create rootfs");
2847 ns = create_mnt_ns(mnt);
2849 panic("Can't allocate initial namespace");
2851 init_task.nsproxy->mnt_ns = ns;
2855 root.dentry = mnt->mnt_root;
2857 set_fs_pwd(current->fs, &root);
2858 set_fs_root(current->fs, &root);
2861 void __init mnt_init(void)
2866 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
2867 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2869 mount_hashtable = alloc_large_system_hash("Mount-cache",
2870 sizeof(struct hlist_head),
2873 &m_hash_shift, &m_hash_mask, 0, 0);
2874 mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
2875 sizeof(struct hlist_head),
2878 &mp_hash_shift, &mp_hash_mask, 0, 0);
2880 if (!mount_hashtable || !mountpoint_hashtable)
2881 panic("Failed to allocate mount hash table\n");
2883 for (u = 0; u <= m_hash_mask; u++)
2884 INIT_HLIST_HEAD(&mount_hashtable[u]);
2885 for (u = 0; u <= mp_hash_mask; u++)
2886 INIT_HLIST_HEAD(&mountpoint_hashtable[u]);
2892 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
2894 fs_kobj = kobject_create_and_add("fs", NULL);
2896 printk(KERN_WARNING "%s: kobj create error\n", __func__);
2901 void put_mnt_ns(struct mnt_namespace *ns)
2903 if (!atomic_dec_and_test(&ns->count))
2905 drop_collected_mounts(&ns->root->mnt);
2909 struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
2911 struct vfsmount *mnt;
2912 mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, data);
2915 * it is a longterm mount, don't release mnt until
2916 * we unmount before file sys is unregistered
2918 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
2922 EXPORT_SYMBOL_GPL(kern_mount_data);
2924 void kern_unmount(struct vfsmount *mnt)
2926 /* release long term mount so mount point can be released */
2927 if (!IS_ERR_OR_NULL(mnt)) {
2928 real_mount(mnt)->mnt_ns = NULL;
2929 synchronize_rcu(); /* yecchhh... */
2933 EXPORT_SYMBOL(kern_unmount);
2935 bool our_mnt(struct vfsmount *mnt)
2937 return check_mnt(real_mount(mnt));
2940 bool current_chrooted(void)
2942 /* Does the current process have a non-standard root */
2943 struct path ns_root;
2944 struct path fs_root;
2947 /* Find the namespace root */
2948 ns_root.mnt = ¤t->nsproxy->mnt_ns->root->mnt;
2949 ns_root.dentry = ns_root.mnt->mnt_root;
2951 while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
2954 get_fs_root(current->fs, &fs_root);
2956 chrooted = !path_equal(&fs_root, &ns_root);
2964 bool fs_fully_visible(struct file_system_type *type)
2966 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
2968 bool visible = false;
2973 down_read(&namespace_sem);
2974 list_for_each_entry(mnt, &ns->list, mnt_list) {
2975 struct mount *child;
2976 if (mnt->mnt.mnt_sb->s_type != type)
2979 /* This mount is not fully visible if there are any child mounts
2980 * that cover anything except for empty directories.
2982 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
2983 struct inode *inode = child->mnt_mountpoint->d_inode;
2984 if (!S_ISDIR(inode->i_mode))
2986 if (inode->i_nlink > 2)
2994 up_read(&namespace_sem);
2998 static void *mntns_get(struct task_struct *task)
3000 struct mnt_namespace *ns = NULL;
3001 struct nsproxy *nsproxy;
3004 nsproxy = task_nsproxy(task);
3006 ns = nsproxy->mnt_ns;
3014 static void mntns_put(void *ns)
3019 static int mntns_install(struct nsproxy *nsproxy, void *ns)
3021 struct fs_struct *fs = current->fs;
3022 struct mnt_namespace *mnt_ns = ns;
3025 if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
3026 !ns_capable(current_user_ns(), CAP_SYS_CHROOT) ||
3027 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
3034 put_mnt_ns(nsproxy->mnt_ns);
3035 nsproxy->mnt_ns = mnt_ns;
3038 root.mnt = &mnt_ns->root->mnt;
3039 root.dentry = mnt_ns->root->mnt.mnt_root;
3041 while(d_mountpoint(root.dentry) && follow_down_one(&root))
3044 /* Update the pwd and root */
3045 set_fs_pwd(fs, &root);
3046 set_fs_root(fs, &root);
3052 static unsigned int mntns_inum(void *ns)
3054 struct mnt_namespace *mnt_ns = ns;
3055 return mnt_ns->proc_inum;
3058 const struct proc_ns_operations mntns_operations = {
3060 .type = CLONE_NEWNS,
3063 .install = mntns_install,