]> git.karo-electronics.de Git - mv-sheeva.git/blob - fs/fs_struct.c
kconfig: rename CONFIG_EMBEDDED to CONFIG_EXPERT
[mv-sheeva.git] / fs / fs_struct.c
1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/fs.h>
4 #include <linux/path.h>
5 #include <linux/slab.h>
6 #include <linux/fs_struct.h>
7 #include "internal.h"
8
9 static inline void path_get_longterm(struct path *path)
10 {
11         path_get(path);
12         mnt_make_longterm(path->mnt);
13 }
14
15 static inline void path_put_longterm(struct path *path)
16 {
17         mnt_make_shortterm(path->mnt);
18         path_put(path);
19 }
20
21 /*
22  * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
23  * It can block.
24  */
25 void set_fs_root(struct fs_struct *fs, struct path *path)
26 {
27         struct path old_root;
28
29         spin_lock(&fs->lock);
30         write_seqcount_begin(&fs->seq);
31         old_root = fs->root;
32         fs->root = *path;
33         path_get_longterm(path);
34         write_seqcount_end(&fs->seq);
35         spin_unlock(&fs->lock);
36         if (old_root.dentry)
37                 path_put_longterm(&old_root);
38 }
39
40 /*
41  * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
42  * It can block.
43  */
44 void set_fs_pwd(struct fs_struct *fs, struct path *path)
45 {
46         struct path old_pwd;
47
48         spin_lock(&fs->lock);
49         write_seqcount_begin(&fs->seq);
50         old_pwd = fs->pwd;
51         fs->pwd = *path;
52         path_get_longterm(path);
53         write_seqcount_end(&fs->seq);
54         spin_unlock(&fs->lock);
55
56         if (old_pwd.dentry)
57                 path_put_longterm(&old_pwd);
58 }
59
60 void chroot_fs_refs(struct path *old_root, struct path *new_root)
61 {
62         struct task_struct *g, *p;
63         struct fs_struct *fs;
64         int count = 0;
65
66         read_lock(&tasklist_lock);
67         do_each_thread(g, p) {
68                 task_lock(p);
69                 fs = p->fs;
70                 if (fs) {
71                         spin_lock(&fs->lock);
72                         write_seqcount_begin(&fs->seq);
73                         if (fs->root.dentry == old_root->dentry
74                             && fs->root.mnt == old_root->mnt) {
75                                 path_get_longterm(new_root);
76                                 fs->root = *new_root;
77                                 count++;
78                         }
79                         if (fs->pwd.dentry == old_root->dentry
80                             && fs->pwd.mnt == old_root->mnt) {
81                                 path_get_longterm(new_root);
82                                 fs->pwd = *new_root;
83                                 count++;
84                         }
85                         write_seqcount_end(&fs->seq);
86                         spin_unlock(&fs->lock);
87                 }
88                 task_unlock(p);
89         } while_each_thread(g, p);
90         read_unlock(&tasklist_lock);
91         while (count--)
92                 path_put_longterm(old_root);
93 }
94
95 void free_fs_struct(struct fs_struct *fs)
96 {
97         path_put_longterm(&fs->root);
98         path_put_longterm(&fs->pwd);
99         kmem_cache_free(fs_cachep, fs);
100 }
101
102 void exit_fs(struct task_struct *tsk)
103 {
104         struct fs_struct *fs = tsk->fs;
105
106         if (fs) {
107                 int kill;
108                 task_lock(tsk);
109                 spin_lock(&fs->lock);
110                 write_seqcount_begin(&fs->seq);
111                 tsk->fs = NULL;
112                 kill = !--fs->users;
113                 write_seqcount_end(&fs->seq);
114                 spin_unlock(&fs->lock);
115                 task_unlock(tsk);
116                 if (kill)
117                         free_fs_struct(fs);
118         }
119 }
120
121 struct fs_struct *copy_fs_struct(struct fs_struct *old)
122 {
123         struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
124         /* We don't need to lock fs - think why ;-) */
125         if (fs) {
126                 fs->users = 1;
127                 fs->in_exec = 0;
128                 spin_lock_init(&fs->lock);
129                 seqcount_init(&fs->seq);
130                 fs->umask = old->umask;
131
132                 spin_lock(&old->lock);
133                 fs->root = old->root;
134                 path_get_longterm(&fs->root);
135                 fs->pwd = old->pwd;
136                 path_get_longterm(&fs->pwd);
137                 spin_unlock(&old->lock);
138         }
139         return fs;
140 }
141
142 int unshare_fs_struct(void)
143 {
144         struct fs_struct *fs = current->fs;
145         struct fs_struct *new_fs = copy_fs_struct(fs);
146         int kill;
147
148         if (!new_fs)
149                 return -ENOMEM;
150
151         task_lock(current);
152         spin_lock(&fs->lock);
153         kill = !--fs->users;
154         current->fs = new_fs;
155         spin_unlock(&fs->lock);
156         task_unlock(current);
157
158         if (kill)
159                 free_fs_struct(fs);
160
161         return 0;
162 }
163 EXPORT_SYMBOL_GPL(unshare_fs_struct);
164
165 int current_umask(void)
166 {
167         return current->fs->umask;
168 }
169 EXPORT_SYMBOL(current_umask);
170
171 /* to be mentioned only in INIT_TASK */
172 struct fs_struct init_fs = {
173         .users          = 1,
174         .lock           = __SPIN_LOCK_UNLOCKED(init_fs.lock),
175         .seq            = SEQCNT_ZERO,
176         .umask          = 0022,
177 };
178
179 void daemonize_fs_struct(void)
180 {
181         struct fs_struct *fs = current->fs;
182
183         if (fs) {
184                 int kill;
185
186                 task_lock(current);
187
188                 spin_lock(&init_fs.lock);
189                 init_fs.users++;
190                 spin_unlock(&init_fs.lock);
191
192                 spin_lock(&fs->lock);
193                 current->fs = &init_fs;
194                 kill = !--fs->users;
195                 spin_unlock(&fs->lock);
196
197                 task_unlock(current);
198                 if (kill)
199                         free_fs_struct(fs);
200         }
201 }