4 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6 * Manage the dynamic fd arrays in the process files_struct.
9 #include <linux/syscalls.h>
10 #include <linux/export.h>
13 #include <linux/mmzone.h>
14 #include <linux/time.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include <linux/file.h>
19 #include <linux/fdtable.h>
20 #include <linux/bitops.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/rcupdate.h>
24 #include <linux/workqueue.h>
26 int sysctl_nr_open __read_mostly = 1024*1024;
27 int sysctl_nr_open_min = BITS_PER_LONG;
28 /* our max() is unusable in constant expressions ;-/ */
29 #define __const_max(x, y) ((x) < (y) ? (x) : (y))
30 int sysctl_nr_open_max = __const_max(INT_MAX, ~(size_t)0/sizeof(void *)) &
33 static void *alloc_fdmem(size_t size)
36 * Very large allocations can stress page reclaim, so fall back to
37 * vmalloc() if the allocation size will be considered "large" by the VM.
39 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
40 void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN|__GFP_NORETRY);
47 static void free_fdmem(void *ptr)
49 is_vmalloc_addr(ptr) ? vfree(ptr) : kfree(ptr);
52 static void __free_fdtable(struct fdtable *fdt)
55 free_fdmem(fdt->open_fds);
59 static void free_fdtable_rcu(struct rcu_head *rcu)
61 __free_fdtable(container_of(rcu, struct fdtable, rcu));
65 * Expand the fdset in the files_struct. Called with the files spinlock
68 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
70 unsigned int cpy, set;
72 BUG_ON(nfdt->max_fds < ofdt->max_fds);
74 cpy = ofdt->max_fds * sizeof(struct file *);
75 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
76 memcpy(nfdt->fd, ofdt->fd, cpy);
77 memset((char *)(nfdt->fd) + cpy, 0, set);
79 cpy = ofdt->max_fds / BITS_PER_BYTE;
80 set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE;
81 memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
82 memset((char *)(nfdt->open_fds) + cpy, 0, set);
83 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
84 memset((char *)(nfdt->close_on_exec) + cpy, 0, set);
87 static struct fdtable * alloc_fdtable(unsigned int nr)
93 * Figure out how many fds we actually want to support in this fdtable.
94 * Allocation steps are keyed to the size of the fdarray, since it
95 * grows far faster than any of the other dynamic data. We try to fit
96 * the fdarray into comfortable page-tuned chunks: starting at 1024B
97 * and growing in powers of two from there on.
99 nr /= (1024 / sizeof(struct file *));
100 nr = roundup_pow_of_two(nr + 1);
101 nr *= (1024 / sizeof(struct file *));
103 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
104 * had been set lower between the check in expand_files() and here. Deal
105 * with that in caller, it's cheaper that way.
107 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
108 * bitmaps handling below becomes unpleasant, to put it mildly...
110 if (unlikely(nr > sysctl_nr_open))
111 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
113 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL);
117 data = alloc_fdmem(nr * sizeof(struct file *));
122 data = alloc_fdmem(max_t(size_t,
123 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES));
126 fdt->open_fds = data;
127 data += nr / BITS_PER_BYTE;
128 fdt->close_on_exec = data;
141 * Expand the file descriptor table.
142 * This function will allocate a new fdtable and both fd array and fdset, of
144 * Return <0 error code on error; 1 on successful completion.
145 * The files->file_lock should be held on entry, and will be held on exit.
147 static int expand_fdtable(struct files_struct *files, int nr)
148 __releases(files->file_lock)
149 __acquires(files->file_lock)
151 struct fdtable *new_fdt, *cur_fdt;
153 spin_unlock(&files->file_lock);
154 new_fdt = alloc_fdtable(nr);
155 spin_lock(&files->file_lock);
159 * extremely unlikely race - sysctl_nr_open decreased between the check in
160 * caller and alloc_fdtable(). Cheaper to catch it here...
162 if (unlikely(new_fdt->max_fds <= nr)) {
163 __free_fdtable(new_fdt);
167 * Check again since another task may have expanded the fd table while
168 * we dropped the lock
170 cur_fdt = files_fdtable(files);
171 if (nr >= cur_fdt->max_fds) {
172 /* Continue as planned */
173 copy_fdtable(new_fdt, cur_fdt);
174 rcu_assign_pointer(files->fdt, new_fdt);
175 if (cur_fdt != &files->fdtab)
176 call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
178 /* Somebody else expanded, so undo our attempt */
179 __free_fdtable(new_fdt);
186 * This function will expand the file structures, if the requested size exceeds
187 * the current capacity and there is room for expansion.
188 * Return <0 error code on error; 0 when nothing done; 1 when files were
189 * expanded and execution may have blocked.
190 * The files->file_lock should be held on entry, and will be held on exit.
192 static int expand_files(struct files_struct *files, int nr)
196 fdt = files_fdtable(files);
198 /* Do we need to expand? */
199 if (nr < fdt->max_fds)
203 if (nr >= sysctl_nr_open)
206 /* All good, so we try */
207 return expand_fdtable(files, nr);
210 static inline void __set_close_on_exec(int fd, struct fdtable *fdt)
212 __set_bit(fd, fdt->close_on_exec);
215 static inline void __clear_close_on_exec(int fd, struct fdtable *fdt)
217 __clear_bit(fd, fdt->close_on_exec);
220 static inline void __set_open_fd(int fd, struct fdtable *fdt)
222 __set_bit(fd, fdt->open_fds);
225 static inline void __clear_open_fd(int fd, struct fdtable *fdt)
227 __clear_bit(fd, fdt->open_fds);
230 static int count_open_files(struct fdtable *fdt)
232 int size = fdt->max_fds;
235 /* Find the last open fd */
236 for (i = size / BITS_PER_LONG; i > 0; ) {
237 if (fdt->open_fds[--i])
240 i = (i + 1) * BITS_PER_LONG;
245 * Allocate a new files structure and copy contents from the
246 * passed in files structure.
247 * errorp will be valid only when the returned files_struct is NULL.
249 struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
251 struct files_struct *newf;
252 struct file **old_fds, **new_fds;
253 int open_files, size, i;
254 struct fdtable *old_fdt, *new_fdt;
257 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
261 atomic_set(&newf->count, 1);
263 spin_lock_init(&newf->file_lock);
265 new_fdt = &newf->fdtab;
266 new_fdt->max_fds = NR_OPEN_DEFAULT;
267 new_fdt->close_on_exec = newf->close_on_exec_init;
268 new_fdt->open_fds = newf->open_fds_init;
269 new_fdt->fd = &newf->fd_array[0];
271 spin_lock(&oldf->file_lock);
272 old_fdt = files_fdtable(oldf);
273 open_files = count_open_files(old_fdt);
276 * Check whether we need to allocate a larger fd array and fd set.
278 while (unlikely(open_files > new_fdt->max_fds)) {
279 spin_unlock(&oldf->file_lock);
281 if (new_fdt != &newf->fdtab)
282 __free_fdtable(new_fdt);
284 new_fdt = alloc_fdtable(open_files - 1);
290 /* beyond sysctl_nr_open; nothing to do */
291 if (unlikely(new_fdt->max_fds < open_files)) {
292 __free_fdtable(new_fdt);
298 * Reacquire the oldf lock and a pointer to its fd table
299 * who knows it may have a new bigger fd table. We need
300 * the latest pointer.
302 spin_lock(&oldf->file_lock);
303 old_fdt = files_fdtable(oldf);
304 open_files = count_open_files(old_fdt);
307 old_fds = old_fdt->fd;
308 new_fds = new_fdt->fd;
310 memcpy(new_fdt->open_fds, old_fdt->open_fds, open_files / 8);
311 memcpy(new_fdt->close_on_exec, old_fdt->close_on_exec, open_files / 8);
313 for (i = open_files; i != 0; i--) {
314 struct file *f = *old_fds++;
319 * The fd may be claimed in the fd bitmap but not yet
320 * instantiated in the files array if a sibling thread
321 * is partway through open(). So make sure that this
322 * fd is available to the new process.
324 __clear_open_fd(open_files - i, new_fdt);
326 rcu_assign_pointer(*new_fds++, f);
328 spin_unlock(&oldf->file_lock);
330 /* compute the remainder to be cleared */
331 size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
333 /* This is long word aligned thus could use a optimized version */
334 memset(new_fds, 0, size);
336 if (new_fdt->max_fds > open_files) {
337 int left = (new_fdt->max_fds - open_files) / 8;
338 int start = open_files / BITS_PER_LONG;
340 memset(&new_fdt->open_fds[start], 0, left);
341 memset(&new_fdt->close_on_exec[start], 0, left);
344 rcu_assign_pointer(newf->fdt, new_fdt);
349 kmem_cache_free(files_cachep, newf);
354 static struct fdtable *close_files(struct files_struct * files)
357 * It is safe to dereference the fd table without RCU or
358 * ->file_lock because this is the last reference to the
361 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
366 i = j * BITS_PER_LONG;
367 if (i >= fdt->max_fds)
369 set = fdt->open_fds[j++];
372 struct file * file = xchg(&fdt->fd[i], NULL);
374 filp_close(file, files);
386 struct files_struct *get_files_struct(struct task_struct *task)
388 struct files_struct *files;
393 atomic_inc(&files->count);
399 void put_files_struct(struct files_struct *files)
401 if (atomic_dec_and_test(&files->count)) {
402 struct fdtable *fdt = close_files(files);
404 /* free the arrays if they are not embedded */
405 if (fdt != &files->fdtab)
407 kmem_cache_free(files_cachep, files);
411 void reset_files_struct(struct files_struct *files)
413 struct task_struct *tsk = current;
414 struct files_struct *old;
420 put_files_struct(old);
423 void exit_files(struct task_struct *tsk)
425 struct files_struct * files = tsk->files;
431 put_files_struct(files);
435 struct files_struct init_files = {
436 .count = ATOMIC_INIT(1),
437 .fdt = &init_files.fdtab,
439 .max_fds = NR_OPEN_DEFAULT,
440 .fd = &init_files.fd_array[0],
441 .close_on_exec = init_files.close_on_exec_init,
442 .open_fds = init_files.open_fds_init,
444 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
448 * allocate a file descriptor, mark it busy.
450 int __alloc_fd(struct files_struct *files,
451 unsigned start, unsigned end, unsigned flags)
457 spin_lock(&files->file_lock);
459 fdt = files_fdtable(files);
461 if (fd < files->next_fd)
464 if (fd < fdt->max_fds)
465 fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, fd);
468 * N.B. For clone tasks sharing a files structure, this test
469 * will limit the total number of files that can be opened.
475 error = expand_files(files, fd);
480 * If we needed to expand the fs array we
481 * might have blocked - try again.
486 if (start <= files->next_fd)
487 files->next_fd = fd + 1;
489 __set_open_fd(fd, fdt);
490 if (flags & O_CLOEXEC)
491 __set_close_on_exec(fd, fdt);
493 __clear_close_on_exec(fd, fdt);
497 if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
498 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
499 rcu_assign_pointer(fdt->fd[fd], NULL);
504 spin_unlock(&files->file_lock);
508 static int alloc_fd(unsigned start, unsigned flags)
510 return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags);
513 int get_unused_fd_flags(unsigned flags)
515 return __alloc_fd(current->files, 0, rlimit(RLIMIT_NOFILE), flags);
517 EXPORT_SYMBOL(get_unused_fd_flags);
519 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
521 struct fdtable *fdt = files_fdtable(files);
522 __clear_open_fd(fd, fdt);
523 if (fd < files->next_fd)
527 void put_unused_fd(unsigned int fd)
529 struct files_struct *files = current->files;
530 spin_lock(&files->file_lock);
531 __put_unused_fd(files, fd);
532 spin_unlock(&files->file_lock);
535 EXPORT_SYMBOL(put_unused_fd);
538 * Install a file pointer in the fd array.
540 * The VFS is full of places where we drop the files lock between
541 * setting the open_fds bitmap and installing the file in the file
542 * array. At any such point, we are vulnerable to a dup2() race
543 * installing a file in the array before us. We need to detect this and
544 * fput() the struct file we are about to overwrite in this case.
546 * It should never happen - if we allow dup2() do it, _really_ bad things
549 * NOTE: __fd_install() variant is really, really low-level; don't
550 * use it unless you are forced to by truly lousy API shoved down
551 * your throat. 'files' *MUST* be either current->files or obtained
552 * by get_files_struct(current) done by whoever had given it to you,
553 * or really bad things will happen. Normally you want to use
554 * fd_install() instead.
557 void __fd_install(struct files_struct *files, unsigned int fd,
561 spin_lock(&files->file_lock);
562 fdt = files_fdtable(files);
563 BUG_ON(fdt->fd[fd] != NULL);
564 rcu_assign_pointer(fdt->fd[fd], file);
565 spin_unlock(&files->file_lock);
568 void fd_install(unsigned int fd, struct file *file)
570 __fd_install(current->files, fd, file);
573 EXPORT_SYMBOL(fd_install);
576 * The same warnings as for __alloc_fd()/__fd_install() apply here...
578 int __close_fd(struct files_struct *files, unsigned fd)
583 spin_lock(&files->file_lock);
584 fdt = files_fdtable(files);
585 if (fd >= fdt->max_fds)
590 rcu_assign_pointer(fdt->fd[fd], NULL);
591 __clear_close_on_exec(fd, fdt);
592 __put_unused_fd(files, fd);
593 spin_unlock(&files->file_lock);
594 return filp_close(file, files);
597 spin_unlock(&files->file_lock);
601 void do_close_on_exec(struct files_struct *files)
606 /* exec unshares first */
607 spin_lock(&files->file_lock);
610 unsigned fd = i * BITS_PER_LONG;
611 fdt = files_fdtable(files);
612 if (fd >= fdt->max_fds)
614 set = fdt->close_on_exec[i];
617 fdt->close_on_exec[i] = 0;
618 for ( ; set ; fd++, set >>= 1) {
625 rcu_assign_pointer(fdt->fd[fd], NULL);
626 __put_unused_fd(files, fd);
627 spin_unlock(&files->file_lock);
628 filp_close(file, files);
630 spin_lock(&files->file_lock);
634 spin_unlock(&files->file_lock);
637 static struct file *__fget(unsigned int fd, fmode_t mask)
639 struct files_struct *files = current->files;
643 file = fcheck_files(files, fd);
645 /* File object ref couldn't be taken */
646 if ((file->f_mode & mask) ||
647 !atomic_long_inc_not_zero(&file->f_count))
655 struct file *fget(unsigned int fd)
657 return __fget(fd, FMODE_PATH);
661 struct file *fget_raw(unsigned int fd)
663 return __fget(fd, 0);
665 EXPORT_SYMBOL(fget_raw);
668 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
670 * You can use this instead of fget if you satisfy all of the following
672 * 1) You must call fput_light before exiting the syscall and returning control
673 * to userspace (i.e. you cannot remember the returned struct file * after
674 * returning to userspace).
675 * 2) You must not call filp_close on the returned struct file * in between
676 * calls to fget_light and fput_light.
677 * 3) You must not clone the current task in between the calls to fget_light
680 * The fput_needed flag returned by fget_light should be passed to the
681 * corresponding fput_light.
683 static unsigned long __fget_light(unsigned int fd, fmode_t mask)
685 struct files_struct *files = current->files;
688 if (atomic_read(&files->count) == 1) {
689 file = __fcheck_files(files, fd);
690 if (!file || unlikely(file->f_mode & mask))
692 return (unsigned long)file;
694 file = __fget(fd, mask);
697 return FDPUT_FPUT | (unsigned long)file;
700 unsigned long __fdget(unsigned int fd)
702 return __fget_light(fd, FMODE_PATH);
704 EXPORT_SYMBOL(__fdget);
706 unsigned long __fdget_raw(unsigned int fd)
708 return __fget_light(fd, 0);
711 unsigned long __fdget_pos(unsigned int fd)
713 unsigned long v = __fdget(fd);
714 struct file *file = (struct file *)(v & ~3);
716 if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
717 if (file_count(file) > 1) {
718 v |= FDPUT_POS_UNLOCK;
719 mutex_lock(&file->f_pos_lock);
726 * We only lock f_pos if we have threads or if the file might be
727 * shared with another process. In both cases we'll have an elevated
728 * file count (done either by fdget() or by fork()).
731 void set_close_on_exec(unsigned int fd, int flag)
733 struct files_struct *files = current->files;
735 spin_lock(&files->file_lock);
736 fdt = files_fdtable(files);
738 __set_close_on_exec(fd, fdt);
740 __clear_close_on_exec(fd, fdt);
741 spin_unlock(&files->file_lock);
744 bool get_close_on_exec(unsigned int fd)
746 struct files_struct *files = current->files;
750 fdt = files_fdtable(files);
751 res = close_on_exec(fd, fdt);
756 static int do_dup2(struct files_struct *files,
757 struct file *file, unsigned fd, unsigned flags)
763 * We need to detect attempts to do dup2() over allocated but still
764 * not finished descriptor. NB: OpenBSD avoids that at the price of
765 * extra work in their equivalent of fget() - they insert struct
766 * file immediately after grabbing descriptor, mark it larval if
767 * more work (e.g. actual opening) is needed and make sure that
768 * fget() treats larval files as absent. Potentially interesting,
769 * but while extra work in fget() is trivial, locking implications
770 * and amount of surgery on open()-related paths in VFS are not.
771 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
772 * deadlocks in rather amusing ways, AFAICS. All of that is out of
773 * scope of POSIX or SUS, since neither considers shared descriptor
774 * tables and this condition does not arise without those.
776 fdt = files_fdtable(files);
777 tofree = fdt->fd[fd];
778 if (!tofree && fd_is_open(fd, fdt))
781 rcu_assign_pointer(fdt->fd[fd], file);
782 __set_open_fd(fd, fdt);
783 if (flags & O_CLOEXEC)
784 __set_close_on_exec(fd, fdt);
786 __clear_close_on_exec(fd, fdt);
787 spin_unlock(&files->file_lock);
790 filp_close(tofree, files);
795 spin_unlock(&files->file_lock);
799 int replace_fd(unsigned fd, struct file *file, unsigned flags)
802 struct files_struct *files = current->files;
805 return __close_fd(files, fd);
807 if (fd >= rlimit(RLIMIT_NOFILE))
810 spin_lock(&files->file_lock);
811 err = expand_files(files, fd);
812 if (unlikely(err < 0))
814 return do_dup2(files, file, fd, flags);
817 spin_unlock(&files->file_lock);
821 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
825 struct files_struct *files = current->files;
827 if ((flags & ~O_CLOEXEC) != 0)
830 if (unlikely(oldfd == newfd))
833 if (newfd >= rlimit(RLIMIT_NOFILE))
836 spin_lock(&files->file_lock);
837 err = expand_files(files, newfd);
838 file = fcheck(oldfd);
841 if (unlikely(err < 0)) {
846 return do_dup2(files, file, newfd, flags);
851 spin_unlock(&files->file_lock);
855 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
857 if (unlikely(newfd == oldfd)) { /* corner case */
858 struct files_struct *files = current->files;
862 if (!fcheck_files(files, oldfd))
867 return sys_dup3(oldfd, newfd, 0);
870 SYSCALL_DEFINE1(dup, unsigned int, fildes)
873 struct file *file = fget_raw(fildes);
876 ret = get_unused_fd();
878 fd_install(ret, file);
885 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
888 if (from >= rlimit(RLIMIT_NOFILE))
890 err = alloc_fd(from, flags);
893 fd_install(err, file);
898 int iterate_fd(struct files_struct *files, unsigned n,
899 int (*f)(const void *, struct file *, unsigned),
906 spin_lock(&files->file_lock);
907 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
909 file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
916 spin_unlock(&files->file_lock);
919 EXPORT_SYMBOL(iterate_fd);