4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/config.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/sched.h>
15 #include <linux/smp_lock.h>
16 #include <linux/init.h>
17 #include <linux/quotaops.h>
18 #include <linux/acct.h>
19 #include <linux/module.h>
20 #include <linux/seq_file.h>
21 #include <linux/namespace.h>
22 #include <linux/namei.h>
23 #include <linux/security.h>
24 #include <linux/mount.h>
25 #include <asm/uaccess.h>
26 #include <asm/unistd.h>
28 extern int __init init_rootfs(void);
31 extern int __init sysfs_init(void);
33 static inline int sysfs_init(void)
39 /* spinlock for vfsmount related operations, inplace of dcache_lock */
40 __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
44 static struct list_head *mount_hashtable;
45 static int hash_mask __read_mostly, hash_bits __read_mostly;
46 static kmem_cache_t *mnt_cache;
48 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
50 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
51 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
52 tmp = tmp + (tmp >> hash_bits);
53 return tmp & hash_mask;
56 struct vfsmount *alloc_vfsmnt(const char *name)
58 struct vfsmount *mnt = kmem_cache_alloc(mnt_cache, GFP_KERNEL);
60 memset(mnt, 0, sizeof(struct vfsmount));
61 atomic_set(&mnt->mnt_count, 1);
62 INIT_LIST_HEAD(&mnt->mnt_hash);
63 INIT_LIST_HEAD(&mnt->mnt_child);
64 INIT_LIST_HEAD(&mnt->mnt_mounts);
65 INIT_LIST_HEAD(&mnt->mnt_list);
66 INIT_LIST_HEAD(&mnt->mnt_expire);
68 int size = strlen(name) + 1;
69 char *newname = kmalloc(size, GFP_KERNEL);
71 memcpy(newname, name, size);
72 mnt->mnt_devname = newname;
79 void free_vfsmnt(struct vfsmount *mnt)
81 kfree(mnt->mnt_devname);
82 kmem_cache_free(mnt_cache, mnt);
86 * Now, lookup_mnt increments the ref count before returning
87 * the vfsmount struct.
89 struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
91 struct list_head *head = mount_hashtable + hash(mnt, dentry);
92 struct list_head *tmp = head;
93 struct vfsmount *p, *found = NULL;
95 spin_lock(&vfsmount_lock);
101 p = list_entry(tmp, struct vfsmount, mnt_hash);
102 if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) {
107 spin_unlock(&vfsmount_lock);
111 static inline int check_mnt(struct vfsmount *mnt)
113 return mnt->mnt_namespace == current->namespace;
116 static void touch_namespace(struct namespace *ns)
120 wake_up_interruptible(&ns->poll);
124 static void __touch_namespace(struct namespace *ns)
126 if (ns && ns->event != event) {
128 wake_up_interruptible(&ns->poll);
132 static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd)
134 old_nd->dentry = mnt->mnt_mountpoint;
135 old_nd->mnt = mnt->mnt_parent;
136 mnt->mnt_parent = mnt;
137 mnt->mnt_mountpoint = mnt->mnt_root;
138 list_del_init(&mnt->mnt_child);
139 list_del_init(&mnt->mnt_hash);
140 old_nd->dentry->d_mounted--;
143 static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd)
145 mnt->mnt_parent = mntget(nd->mnt);
146 mnt->mnt_mountpoint = dget(nd->dentry);
147 list_add(&mnt->mnt_hash, mount_hashtable + hash(nd->mnt, nd->dentry));
148 list_add_tail(&mnt->mnt_child, &nd->mnt->mnt_mounts);
149 nd->dentry->d_mounted++;
152 static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
154 struct list_head *next = p->mnt_mounts.next;
155 if (next == &p->mnt_mounts) {
159 next = p->mnt_child.next;
160 if (next != &p->mnt_parent->mnt_mounts)
165 return list_entry(next, struct vfsmount, mnt_child);
168 static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root)
170 struct super_block *sb = old->mnt_sb;
171 struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
174 mnt->mnt_flags = old->mnt_flags;
175 atomic_inc(&sb->s_active);
177 mnt->mnt_root = dget(root);
178 mnt->mnt_mountpoint = mnt->mnt_root;
179 mnt->mnt_parent = mnt;
180 mnt->mnt_namespace = current->namespace;
182 /* stick the duplicate mount on the same expiry list
183 * as the original if that was on one */
184 spin_lock(&vfsmount_lock);
185 if (!list_empty(&old->mnt_expire))
186 list_add(&mnt->mnt_expire, &old->mnt_expire);
187 spin_unlock(&vfsmount_lock);
192 static inline void __mntput(struct vfsmount *mnt)
194 struct super_block *sb = mnt->mnt_sb;
197 deactivate_super(sb);
200 void mntput_no_expire(struct vfsmount *mnt)
203 if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) {
204 if (likely(!mnt->mnt_pinned)) {
205 spin_unlock(&vfsmount_lock);
209 atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
211 spin_unlock(&vfsmount_lock);
212 acct_auto_close_mnt(mnt);
213 security_sb_umount_close(mnt);
218 EXPORT_SYMBOL(mntput_no_expire);
220 void mnt_pin(struct vfsmount *mnt)
222 spin_lock(&vfsmount_lock);
224 spin_unlock(&vfsmount_lock);
227 EXPORT_SYMBOL(mnt_pin);
229 void mnt_unpin(struct vfsmount *mnt)
231 spin_lock(&vfsmount_lock);
232 if (mnt->mnt_pinned) {
233 atomic_inc(&mnt->mnt_count);
236 spin_unlock(&vfsmount_lock);
239 EXPORT_SYMBOL(mnt_unpin);
242 static void *m_start(struct seq_file *m, loff_t *pos)
244 struct namespace *n = m->private;
249 list_for_each(p, &n->list)
251 return list_entry(p, struct vfsmount, mnt_list);
255 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
257 struct namespace *n = m->private;
258 struct list_head *p = ((struct vfsmount *)v)->mnt_list.next;
260 return p == &n->list ? NULL : list_entry(p, struct vfsmount, mnt_list);
263 static void m_stop(struct seq_file *m, void *v)
265 struct namespace *n = m->private;
269 static inline void mangle(struct seq_file *m, const char *s)
271 seq_escape(m, s, " \t\n\\");
274 static int show_vfsmnt(struct seq_file *m, void *v)
276 struct vfsmount *mnt = v;
278 static struct proc_fs_info {
282 { MS_SYNCHRONOUS, ",sync" },
283 { MS_DIRSYNC, ",dirsync" },
284 { MS_MANDLOCK, ",mand" },
285 { MS_NOATIME, ",noatime" },
286 { MS_NODIRATIME, ",nodiratime" },
289 static struct proc_fs_info mnt_info[] = {
290 { MNT_NOSUID, ",nosuid" },
291 { MNT_NODEV, ",nodev" },
292 { MNT_NOEXEC, ",noexec" },
295 struct proc_fs_info *fs_infop;
297 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
299 seq_path(m, mnt, mnt->mnt_root, " \t\n\\");
301 mangle(m, mnt->mnt_sb->s_type->name);
302 seq_puts(m, mnt->mnt_sb->s_flags & MS_RDONLY ? " ro" : " rw");
303 for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
304 if (mnt->mnt_sb->s_flags & fs_infop->flag)
305 seq_puts(m, fs_infop->str);
307 for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
308 if (mnt->mnt_flags & fs_infop->flag)
309 seq_puts(m, fs_infop->str);
311 if (mnt->mnt_sb->s_op->show_options)
312 err = mnt->mnt_sb->s_op->show_options(m, mnt);
313 seq_puts(m, " 0 0\n");
317 struct seq_operations mounts_op = {
325 * may_umount_tree - check if a mount tree is busy
326 * @mnt: root of mount tree
328 * This is called to check if a tree of mounts has any
329 * open files, pwds, chroots or sub mounts that are
332 int may_umount_tree(struct vfsmount *mnt)
334 struct list_head *next;
335 struct vfsmount *this_parent = mnt;
339 spin_lock(&vfsmount_lock);
340 actual_refs = atomic_read(&mnt->mnt_count);
343 next = this_parent->mnt_mounts.next;
345 while (next != &this_parent->mnt_mounts) {
347 list_entry(next, struct vfsmount, mnt_child);
351 actual_refs += atomic_read(&p->mnt_count);
354 if (!list_empty(&p->mnt_mounts)) {
360 if (this_parent != mnt) {
361 next = this_parent->mnt_child.next;
362 this_parent = this_parent->mnt_parent;
365 spin_unlock(&vfsmount_lock);
367 if (actual_refs > minimum_refs)
373 EXPORT_SYMBOL(may_umount_tree);
376 * may_umount - check if a mount point is busy
377 * @mnt: root of mount
379 * This is called to check if a mount point has any
380 * open files, pwds, chroots or sub mounts. If the
381 * mount has sub mounts this will return busy
382 * regardless of whether the sub mounts are busy.
384 * Doesn't take quota and stuff into account. IOW, in some cases it will
385 * give false negatives. The main reason why it's here is that we need
386 * a non-destructive way to look for easily umountable filesystems.
388 int may_umount(struct vfsmount *mnt)
390 if (atomic_read(&mnt->mnt_count) > 2)
395 EXPORT_SYMBOL(may_umount);
397 static void umount_tree(struct vfsmount *mnt)
402 for (p = mnt; p; p = next_mnt(p, mnt)) {
403 list_del(&p->mnt_list);
404 list_add(&p->mnt_list, &kill);
405 __touch_namespace(p->mnt_namespace);
406 p->mnt_namespace = NULL;
409 while (!list_empty(&kill)) {
410 mnt = list_entry(kill.next, struct vfsmount, mnt_list);
411 list_del_init(&mnt->mnt_list);
412 list_del_init(&mnt->mnt_expire);
413 if (mnt->mnt_parent == mnt) {
414 spin_unlock(&vfsmount_lock);
416 struct nameidata old_nd;
417 detach_mnt(mnt, &old_nd);
418 spin_unlock(&vfsmount_lock);
419 path_release(&old_nd);
422 spin_lock(&vfsmount_lock);
426 static int do_umount(struct vfsmount *mnt, int flags)
428 struct super_block *sb = mnt->mnt_sb;
431 retval = security_sb_umount(mnt, flags);
436 * Allow userspace to request a mountpoint be expired rather than
437 * unmounting unconditionally. Unmount only happens if:
438 * (1) the mark is already set (the mark is cleared by mntput())
439 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
441 if (flags & MNT_EXPIRE) {
442 if (mnt == current->fs->rootmnt ||
443 flags & (MNT_FORCE | MNT_DETACH))
446 if (atomic_read(&mnt->mnt_count) != 2)
449 if (!xchg(&mnt->mnt_expiry_mark, 1))
454 * If we may have to abort operations to get out of this
455 * mount, and they will themselves hold resources we must
456 * allow the fs to do things. In the Unix tradition of
457 * 'Gee thats tricky lets do it in userspace' the umount_begin
458 * might fail to complete on the first run through as other tasks
459 * must return, and the like. Thats for the mount program to worry
460 * about for the moment.
464 if ((flags & MNT_FORCE) && sb->s_op->umount_begin)
465 sb->s_op->umount_begin(sb);
469 * No sense to grab the lock for this test, but test itself looks
470 * somewhat bogus. Suggestions for better replacement?
471 * Ho-hum... In principle, we might treat that as umount + switch
472 * to rootfs. GC would eventually take care of the old vfsmount.
473 * Actually it makes sense, especially if rootfs would contain a
474 * /reboot - static binary that would close all descriptors and
475 * call reboot(9). Then init(8) could umount root and exec /reboot.
477 if (mnt == current->fs->rootmnt && !(flags & MNT_DETACH)) {
479 * Special case for "unmounting" root ...
480 * we just try to remount it readonly.
482 down_write(&sb->s_umount);
483 if (!(sb->s_flags & MS_RDONLY)) {
486 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
489 up_write(&sb->s_umount);
493 down_write(¤t->namespace->sem);
494 spin_lock(&vfsmount_lock);
498 if (atomic_read(&mnt->mnt_count) == 2 || flags & MNT_DETACH) {
499 if (!list_empty(&mnt->mnt_list))
503 spin_unlock(&vfsmount_lock);
505 security_sb_umount_busy(mnt);
506 up_write(¤t->namespace->sem);
511 * Now umount can handle mount points as well as block devices.
512 * This is important for filesystems which use unnamed block devices.
514 * We now support a flag for forced unmount like the other 'big iron'
515 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
518 asmlinkage long sys_umount(char __user * name, int flags)
523 retval = __user_walk(name, LOOKUP_FOLLOW, &nd);
527 if (nd.dentry != nd.mnt->mnt_root)
529 if (!check_mnt(nd.mnt))
533 if (!capable(CAP_SYS_ADMIN))
536 retval = do_umount(nd.mnt, flags);
538 path_release_on_umount(&nd);
543 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
546 * The 2.0 compatible umount. No flags.
548 asmlinkage long sys_oldumount(char __user * name)
550 return sys_umount(name, 0);
555 static int mount_is_safe(struct nameidata *nd)
557 if (capable(CAP_SYS_ADMIN))
561 if (S_ISLNK(nd->dentry->d_inode->i_mode))
563 if (nd->dentry->d_inode->i_mode & S_ISVTX) {
564 if (current->uid != nd->dentry->d_inode->i_uid)
567 if (permission(nd->dentry->d_inode, MAY_WRITE, nd))
573 static int lives_below_in_same_fs(struct dentry *d, struct dentry *dentry)
578 if (d == NULL || d == d->d_parent)
584 static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry)
586 struct vfsmount *res, *p, *q, *r, *s;
589 res = q = clone_mnt(mnt, dentry);
592 q->mnt_mountpoint = mnt->mnt_mountpoint;
595 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
596 if (!lives_below_in_same_fs(r->mnt_mountpoint, dentry))
599 for (s = r; s; s = next_mnt(s, r)) {
600 while (p != s->mnt_parent) {
606 nd.dentry = p->mnt_mountpoint;
607 q = clone_mnt(p, p->mnt_root);
610 spin_lock(&vfsmount_lock);
611 list_add_tail(&q->mnt_list, &res->mnt_list);
613 spin_unlock(&vfsmount_lock);
619 spin_lock(&vfsmount_lock);
621 spin_unlock(&vfsmount_lock);
626 static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
629 if (mnt->mnt_sb->s_flags & MS_NOUSER)
632 if (S_ISDIR(nd->dentry->d_inode->i_mode) !=
633 S_ISDIR(mnt->mnt_root->d_inode->i_mode))
637 down(&nd->dentry->d_inode->i_sem);
638 if (IS_DEADDIR(nd->dentry->d_inode))
641 err = security_sb_check_sb(mnt, nd);
646 spin_lock(&vfsmount_lock);
647 if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry)) {
648 struct list_head head;
651 list_add_tail(&head, &mnt->mnt_list);
652 list_splice(&head, current->namespace->list.prev);
655 touch_namespace(current->namespace);
657 spin_unlock(&vfsmount_lock);
659 up(&nd->dentry->d_inode->i_sem);
661 security_sb_post_addmount(mnt, nd);
668 static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
670 struct nameidata old_nd;
671 struct vfsmount *mnt = NULL;
672 int err = mount_is_safe(nd);
675 if (!old_name || !*old_name)
677 err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd);
681 down_write(¤t->namespace->sem);
683 if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt))
688 mnt = copy_tree(old_nd.mnt, old_nd.dentry);
690 mnt = clone_mnt(old_nd.mnt, old_nd.dentry);
695 /* stop bind mounts from expiring */
696 spin_lock(&vfsmount_lock);
697 list_del_init(&mnt->mnt_expire);
698 spin_unlock(&vfsmount_lock);
700 err = graft_tree(mnt, nd);
702 spin_lock(&vfsmount_lock);
704 spin_unlock(&vfsmount_lock);
709 up_write(¤t->namespace->sem);
710 path_release(&old_nd);
715 * change filesystem flags. dir should be a physical root of filesystem.
716 * If you've mounted a non-root directory somewhere and want to do remount
717 * on it - tough luck.
719 static int do_remount(struct nameidata *nd, int flags, int mnt_flags,
723 struct super_block *sb = nd->mnt->mnt_sb;
725 if (!capable(CAP_SYS_ADMIN))
728 if (!check_mnt(nd->mnt))
731 if (nd->dentry != nd->mnt->mnt_root)
734 down_write(&sb->s_umount);
735 err = do_remount_sb(sb, flags, data, 0);
737 nd->mnt->mnt_flags = mnt_flags;
738 up_write(&sb->s_umount);
740 security_sb_post_remount(nd->mnt, flags, data);
744 static int do_move_mount(struct nameidata *nd, char *old_name)
746 struct nameidata old_nd, parent_nd;
749 if (!capable(CAP_SYS_ADMIN))
751 if (!old_name || !*old_name)
753 err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd);
757 down_write(¤t->namespace->sem);
758 while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
761 if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt))
765 down(&nd->dentry->d_inode->i_sem);
766 if (IS_DEADDIR(nd->dentry->d_inode))
769 spin_lock(&vfsmount_lock);
770 if (!IS_ROOT(nd->dentry) && d_unhashed(nd->dentry))
774 if (old_nd.dentry != old_nd.mnt->mnt_root)
777 if (old_nd.mnt == old_nd.mnt->mnt_parent)
780 if (S_ISDIR(nd->dentry->d_inode->i_mode) !=
781 S_ISDIR(old_nd.dentry->d_inode->i_mode))
785 for (p = nd->mnt; p->mnt_parent != p; p = p->mnt_parent)
790 detach_mnt(old_nd.mnt, &parent_nd);
791 attach_mnt(old_nd.mnt, nd);
792 touch_namespace(current->namespace);
794 /* if the mount is moved, it should no longer be expire
796 list_del_init(&old_nd.mnt->mnt_expire);
798 spin_unlock(&vfsmount_lock);
800 up(&nd->dentry->d_inode->i_sem);
802 up_write(¤t->namespace->sem);
804 path_release(&parent_nd);
805 path_release(&old_nd);
810 * create a new mount for userspace and request it to be added into the
813 static int do_new_mount(struct nameidata *nd, char *type, int flags,
814 int mnt_flags, char *name, void *data)
816 struct vfsmount *mnt;
818 if (!type || !memchr(type, 0, PAGE_SIZE))
821 /* we need capabilities... */
822 if (!capable(CAP_SYS_ADMIN))
825 mnt = do_kern_mount(type, flags, name, data);
829 return do_add_mount(mnt, nd, mnt_flags, NULL);
833 * add a mount into a namespace's mount tree
834 * - provide the option of adding the new mount to an expiration list
836 int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd,
837 int mnt_flags, struct list_head *fslist)
841 down_write(¤t->namespace->sem);
842 /* Something was mounted here while we slept */
843 while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
846 if (!check_mnt(nd->mnt))
849 /* Refuse the same filesystem on the same mount point */
851 if (nd->mnt->mnt_sb == newmnt->mnt_sb &&
852 nd->mnt->mnt_root == nd->dentry)
856 if (S_ISLNK(newmnt->mnt_root->d_inode->i_mode))
859 newmnt->mnt_flags = mnt_flags;
860 newmnt->mnt_namespace = current->namespace;
861 err = graft_tree(newmnt, nd);
863 if (err == 0 && fslist) {
864 /* add to the specified expiration list */
865 spin_lock(&vfsmount_lock);
866 list_add_tail(&newmnt->mnt_expire, fslist);
867 spin_unlock(&vfsmount_lock);
871 up_write(¤t->namespace->sem);
876 EXPORT_SYMBOL_GPL(do_add_mount);
878 static void expire_mount(struct vfsmount *mnt, struct list_head *mounts)
880 spin_lock(&vfsmount_lock);
883 * Check if mount is still attached, if not, let whoever holds it deal
886 if (mnt->mnt_parent == mnt) {
887 spin_unlock(&vfsmount_lock);
892 * Check that it is still dead: the count should now be 2 - as
893 * contributed by the vfsmount parent and the mntget above
895 if (atomic_read(&mnt->mnt_count) == 2) {
896 struct nameidata old_nd;
898 /* delete from the namespace */
899 touch_namespace(mnt->mnt_namespace);
900 list_del_init(&mnt->mnt_list);
901 mnt->mnt_namespace = NULL;
902 detach_mnt(mnt, &old_nd);
903 spin_unlock(&vfsmount_lock);
904 path_release(&old_nd);
908 * Someone brought it back to life whilst we didn't have any
909 * locks held so return it to the expiration list
911 list_add_tail(&mnt->mnt_expire, mounts);
912 spin_unlock(&vfsmount_lock);
917 * process a list of expirable mountpoints with the intent of discarding any
918 * mountpoints that aren't in use and haven't been touched since last we came
921 void mark_mounts_for_expiry(struct list_head *mounts)
923 struct namespace *namespace;
924 struct vfsmount *mnt, *next;
925 LIST_HEAD(graveyard);
927 if (list_empty(mounts))
930 spin_lock(&vfsmount_lock);
932 /* extract from the expiration list every vfsmount that matches the
933 * following criteria:
934 * - only referenced by its parent vfsmount
935 * - still marked for expiry (marked on the last call here; marks are
936 * cleared by mntput())
938 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
939 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
940 atomic_read(&mnt->mnt_count) != 1)
944 list_move(&mnt->mnt_expire, &graveyard);
948 * go through the vfsmounts we've just consigned to the graveyard to
949 * - check that they're still dead
950 * - delete the vfsmount from the appropriate namespace under lock
951 * - dispose of the corpse
953 while (!list_empty(&graveyard)) {
954 mnt = list_entry(graveyard.next, struct vfsmount, mnt_expire);
955 list_del_init(&mnt->mnt_expire);
957 /* don't do anything if the namespace is dead - all the
958 * vfsmounts from it are going away anyway */
959 namespace = mnt->mnt_namespace;
960 if (!namespace || !namespace->root)
962 get_namespace(namespace);
964 spin_unlock(&vfsmount_lock);
965 down_write(&namespace->sem);
966 expire_mount(mnt, mounts);
967 up_write(&namespace->sem);
970 put_namespace(namespace);
972 spin_lock(&vfsmount_lock);
975 spin_unlock(&vfsmount_lock);
978 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
981 * Some copy_from_user() implementations do not return the exact number of
982 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
983 * Note that this function differs from copy_from_user() in that it will oops
984 * on bad values of `to', rather than returning a short copy.
986 static long exact_copy_from_user(void *to, const void __user * from,
990 const char __user *f = from;
993 if (!access_ok(VERIFY_READ, from, n))
997 if (__get_user(c, f)) {
1008 int copy_mount_options(const void __user * data, unsigned long *where)
1018 if (!(page = __get_free_page(GFP_KERNEL)))
1021 /* We only care that *some* data at the address the user
1022 * gave us is valid. Just in case, we'll zero
1023 * the remainder of the page.
1025 /* copy_from_user cannot cross TASK_SIZE ! */
1026 size = TASK_SIZE - (unsigned long)data;
1027 if (size > PAGE_SIZE)
1030 i = size - exact_copy_from_user((void *)page, data, size);
1036 memset((char *)page + i, 0, PAGE_SIZE - i);
1042 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
1043 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
1045 * data is a (void *) that can point to any structure up to
1046 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
1047 * information (or be NULL).
1049 * Pre-0.97 versions of mount() didn't have a flags word.
1050 * When the flags word was introduced its top half was required
1051 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
1052 * Therefore, if this magic number is present, it carries no information
1053 * and must be discarded.
1055 long do_mount(char *dev_name, char *dir_name, char *type_page,
1056 unsigned long flags, void *data_page)
1058 struct nameidata nd;
1063 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
1064 flags &= ~MS_MGC_MSK;
1066 /* Basic sanity checks */
1068 if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
1070 if (dev_name && !memchr(dev_name, 0, PAGE_SIZE))
1074 ((char *)data_page)[PAGE_SIZE - 1] = 0;
1076 /* Separate the per-mountpoint flags */
1077 if (flags & MS_NOSUID)
1078 mnt_flags |= MNT_NOSUID;
1079 if (flags & MS_NODEV)
1080 mnt_flags |= MNT_NODEV;
1081 if (flags & MS_NOEXEC)
1082 mnt_flags |= MNT_NOEXEC;
1083 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE);
1085 /* ... and get the mountpoint */
1086 retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd);
1090 retval = security_sb_mount(dev_name, &nd, type_page, flags, data_page);
1094 if (flags & MS_REMOUNT)
1095 retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
1097 else if (flags & MS_BIND)
1098 retval = do_loopback(&nd, dev_name, flags & MS_REC);
1099 else if (flags & MS_MOVE)
1100 retval = do_move_mount(&nd, dev_name);
1102 retval = do_new_mount(&nd, type_page, flags, mnt_flags,
1103 dev_name, data_page);
1109 int copy_namespace(int flags, struct task_struct *tsk)
1111 struct namespace *namespace = tsk->namespace;
1112 struct namespace *new_ns;
1113 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL, *altrootmnt = NULL;
1114 struct fs_struct *fs = tsk->fs;
1115 struct vfsmount *p, *q;
1120 get_namespace(namespace);
1122 if (!(flags & CLONE_NEWNS))
1125 if (!capable(CAP_SYS_ADMIN)) {
1126 put_namespace(namespace);
1130 new_ns = kmalloc(sizeof(struct namespace), GFP_KERNEL);
1134 atomic_set(&new_ns->count, 1);
1135 init_rwsem(&new_ns->sem);
1136 INIT_LIST_HEAD(&new_ns->list);
1137 init_waitqueue_head(&new_ns->poll);
1140 down_write(&tsk->namespace->sem);
1141 /* First pass: copy the tree topology */
1142 new_ns->root = copy_tree(namespace->root, namespace->root->mnt_root);
1143 if (!new_ns->root) {
1144 up_write(&tsk->namespace->sem);
1148 spin_lock(&vfsmount_lock);
1149 list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
1150 spin_unlock(&vfsmount_lock);
1153 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
1154 * as belonging to new namespace. We have already acquired a private
1155 * fs_struct, so tsk->fs->lock is not needed.
1157 p = namespace->root;
1160 q->mnt_namespace = new_ns;
1162 if (p == fs->rootmnt) {
1164 fs->rootmnt = mntget(q);
1166 if (p == fs->pwdmnt) {
1168 fs->pwdmnt = mntget(q);
1170 if (p == fs->altrootmnt) {
1172 fs->altrootmnt = mntget(q);
1175 p = next_mnt(p, namespace->root);
1176 q = next_mnt(q, new_ns->root);
1178 up_write(&tsk->namespace->sem);
1180 tsk->namespace = new_ns;
1189 put_namespace(namespace);
1193 put_namespace(namespace);
1197 asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name,
1198 char __user * type, unsigned long flags,
1202 unsigned long data_page;
1203 unsigned long type_page;
1204 unsigned long dev_page;
1207 retval = copy_mount_options(type, &type_page);
1211 dir_page = getname(dir_name);
1212 retval = PTR_ERR(dir_page);
1213 if (IS_ERR(dir_page))
1216 retval = copy_mount_options(dev_name, &dev_page);
1220 retval = copy_mount_options(data, &data_page);
1225 retval = do_mount((char *)dev_page, dir_page, (char *)type_page,
1226 flags, (void *)data_page);
1228 free_page(data_page);
1231 free_page(dev_page);
1235 free_page(type_page);
1240 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
1241 * It can block. Requires the big lock held.
1243 void set_fs_root(struct fs_struct *fs, struct vfsmount *mnt,
1244 struct dentry *dentry)
1246 struct dentry *old_root;
1247 struct vfsmount *old_rootmnt;
1248 write_lock(&fs->lock);
1249 old_root = fs->root;
1250 old_rootmnt = fs->rootmnt;
1251 fs->rootmnt = mntget(mnt);
1252 fs->root = dget(dentry);
1253 write_unlock(&fs->lock);
1256 mntput(old_rootmnt);
1261 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
1262 * It can block. Requires the big lock held.
1264 void set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
1265 struct dentry *dentry)
1267 struct dentry *old_pwd;
1268 struct vfsmount *old_pwdmnt;
1270 write_lock(&fs->lock);
1272 old_pwdmnt = fs->pwdmnt;
1273 fs->pwdmnt = mntget(mnt);
1274 fs->pwd = dget(dentry);
1275 write_unlock(&fs->lock);
1283 static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
1285 struct task_struct *g, *p;
1286 struct fs_struct *fs;
1288 read_lock(&tasklist_lock);
1289 do_each_thread(g, p) {
1293 atomic_inc(&fs->count);
1295 if (fs->root == old_nd->dentry
1296 && fs->rootmnt == old_nd->mnt)
1297 set_fs_root(fs, new_nd->mnt, new_nd->dentry);
1298 if (fs->pwd == old_nd->dentry
1299 && fs->pwdmnt == old_nd->mnt)
1300 set_fs_pwd(fs, new_nd->mnt, new_nd->dentry);
1304 } while_each_thread(g, p);
1305 read_unlock(&tasklist_lock);
1309 * pivot_root Semantics:
1310 * Moves the root file system of the current process to the directory put_old,
1311 * makes new_root as the new root file system of the current process, and sets
1312 * root/cwd of all processes which had them on the current root to new_root.
1315 * The new_root and put_old must be directories, and must not be on the
1316 * same file system as the current process root. The put_old must be
1317 * underneath new_root, i.e. adding a non-zero number of /.. to the string
1318 * pointed to by put_old must yield the same directory as new_root. No other
1319 * file system may be mounted on put_old. After all, new_root is a mountpoint.
1322 * - we don't move root/cwd if they are not at the root (reason: if something
1323 * cared enough to change them, it's probably wrong to force them elsewhere)
1324 * - it's okay to pick a root that isn't the root of a file system, e.g.
1325 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
1326 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
1329 asmlinkage long sys_pivot_root(const char __user * new_root,
1330 const char __user * put_old)
1332 struct vfsmount *tmp;
1333 struct nameidata new_nd, old_nd, parent_nd, root_parent, user_nd;
1336 if (!capable(CAP_SYS_ADMIN))
1341 error = __user_walk(new_root, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
1346 if (!check_mnt(new_nd.mnt))
1349 error = __user_walk(put_old, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old_nd);
1353 error = security_sb_pivotroot(&old_nd, &new_nd);
1355 path_release(&old_nd);
1359 read_lock(¤t->fs->lock);
1360 user_nd.mnt = mntget(current->fs->rootmnt);
1361 user_nd.dentry = dget(current->fs->root);
1362 read_unlock(¤t->fs->lock);
1363 down_write(¤t->namespace->sem);
1364 down(&old_nd.dentry->d_inode->i_sem);
1366 if (!check_mnt(user_nd.mnt))
1369 if (IS_DEADDIR(new_nd.dentry->d_inode))
1371 if (d_unhashed(new_nd.dentry) && !IS_ROOT(new_nd.dentry))
1373 if (d_unhashed(old_nd.dentry) && !IS_ROOT(old_nd.dentry))
1376 if (new_nd.mnt == user_nd.mnt || old_nd.mnt == user_nd.mnt)
1377 goto out2; /* loop, on the same file system */
1379 if (user_nd.mnt->mnt_root != user_nd.dentry)
1380 goto out2; /* not a mountpoint */
1381 if (user_nd.mnt->mnt_parent == user_nd.mnt)
1382 goto out2; /* not attached */
1383 if (new_nd.mnt->mnt_root != new_nd.dentry)
1384 goto out2; /* not a mountpoint */
1385 if (new_nd.mnt->mnt_parent == new_nd.mnt)
1386 goto out2; /* not attached */
1387 tmp = old_nd.mnt; /* make sure we can reach put_old from new_root */
1388 spin_lock(&vfsmount_lock);
1389 if (tmp != new_nd.mnt) {
1391 if (tmp->mnt_parent == tmp)
1392 goto out3; /* already mounted on put_old */
1393 if (tmp->mnt_parent == new_nd.mnt)
1395 tmp = tmp->mnt_parent;
1397 if (!is_subdir(tmp->mnt_mountpoint, new_nd.dentry))
1399 } else if (!is_subdir(old_nd.dentry, new_nd.dentry))
1401 detach_mnt(new_nd.mnt, &parent_nd);
1402 detach_mnt(user_nd.mnt, &root_parent);
1403 attach_mnt(user_nd.mnt, &old_nd); /* mount old root on put_old */
1404 attach_mnt(new_nd.mnt, &root_parent); /* mount new_root on / */
1405 touch_namespace(current->namespace);
1406 spin_unlock(&vfsmount_lock);
1407 chroot_fs_refs(&user_nd, &new_nd);
1408 security_sb_post_pivotroot(&user_nd, &new_nd);
1410 path_release(&root_parent);
1411 path_release(&parent_nd);
1413 up(&old_nd.dentry->d_inode->i_sem);
1414 up_write(¤t->namespace->sem);
1415 path_release(&user_nd);
1416 path_release(&old_nd);
1418 path_release(&new_nd);
1423 spin_unlock(&vfsmount_lock);
1427 static void __init init_mount_tree(void)
1429 struct vfsmount *mnt;
1430 struct namespace *namespace;
1431 struct task_struct *g, *p;
1433 mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
1435 panic("Can't create rootfs");
1436 namespace = kmalloc(sizeof(*namespace), GFP_KERNEL);
1438 panic("Can't allocate initial namespace");
1439 atomic_set(&namespace->count, 1);
1440 INIT_LIST_HEAD(&namespace->list);
1441 init_rwsem(&namespace->sem);
1442 init_waitqueue_head(&namespace->poll);
1443 namespace->event = 0;
1444 list_add(&mnt->mnt_list, &namespace->list);
1445 namespace->root = mnt;
1446 mnt->mnt_namespace = namespace;
1448 init_task.namespace = namespace;
1449 read_lock(&tasklist_lock);
1450 do_each_thread(g, p) {
1451 get_namespace(namespace);
1452 p->namespace = namespace;
1453 } while_each_thread(g, p);
1454 read_unlock(&tasklist_lock);
1456 set_fs_pwd(current->fs, namespace->root, namespace->root->mnt_root);
1457 set_fs_root(current->fs, namespace->root, namespace->root->mnt_root);
1460 void __init mnt_init(unsigned long mempages)
1462 struct list_head *d;
1463 unsigned int nr_hash;
1466 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
1467 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL, NULL);
1469 mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
1471 if (!mount_hashtable)
1472 panic("Failed to allocate mount hash table\n");
1475 * Find the power-of-two list-heads that can fit into the allocation..
1476 * We don't guarantee that "sizeof(struct list_head)" is necessarily
1479 nr_hash = PAGE_SIZE / sizeof(struct list_head);
1483 } while ((nr_hash >> hash_bits) != 0);
1487 * Re-calculate the actual number of entries and the mask
1488 * from the number of bits we can fit.
1490 nr_hash = 1UL << hash_bits;
1491 hash_mask = nr_hash - 1;
1493 printk("Mount-cache hash table entries: %d\n", nr_hash);
1495 /* And initialize the newly allocated array */
1496 d = mount_hashtable;
1508 void __put_namespace(struct namespace *namespace)
1510 struct vfsmount *root = namespace->root;
1511 namespace->root = NULL;
1512 spin_unlock(&vfsmount_lock);
1513 down_write(&namespace->sem);
1514 spin_lock(&vfsmount_lock);
1516 spin_unlock(&vfsmount_lock);
1517 up_write(&namespace->sem);