4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/syscalls.h>
8 #include <linux/init.h>
11 #include <linux/file.h>
12 #include <linux/dnotify.h>
13 #include <linux/smp_lock.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/security.h>
17 #include <linux/ptrace.h>
18 #include <linux/signal.h>
21 #include <asm/siginfo.h>
22 #include <asm/uaccess.h>
24 void fastcall set_close_on_exec(unsigned int fd, int flag)
26 struct files_struct *files = current->files;
28 spin_lock(&files->file_lock);
29 fdt = files_fdtable(files);
31 FD_SET(fd, fdt->close_on_exec);
33 FD_CLR(fd, fdt->close_on_exec);
34 spin_unlock(&files->file_lock);
37 static inline int get_close_on_exec(unsigned int fd)
39 struct files_struct *files = current->files;
42 spin_lock(&files->file_lock);
43 fdt = files_fdtable(files);
44 res = FD_ISSET(fd, fdt->close_on_exec);
45 spin_unlock(&files->file_lock);
50 * locate_fd finds a free file descriptor in the open_fds fdset,
51 * expanding the fd arrays if necessary. Must be called with the
52 * file_lock held for write.
55 static int locate_fd(struct files_struct *files,
56 struct file *file, unsigned int orig_start)
64 if (orig_start >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
67 fdt = files_fdtable(files);
70 * Someone might have closed fd's in the range
71 * orig_start..fdt->next_fd
74 if (start < fdt->next_fd)
78 if (start < fdt->max_fdset) {
79 newfd = find_next_zero_bit(fdt->open_fds->fds_bits,
80 fdt->max_fdset, start);
84 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
87 error = expand_files(files, newfd);
92 * If we needed to expand the fs array we
93 * might have blocked - try again.
98 if (start <= fdt->next_fd)
99 fdt->next_fd = newfd + 1;
107 static int dupfd(struct file *file, unsigned int start)
109 struct files_struct * files = current->files;
113 spin_lock(&files->file_lock);
114 fd = locate_fd(files, file, start);
116 /* locate_fd() may have expanded fdtable, load the ptr */
117 fdt = files_fdtable(files);
118 FD_SET(fd, fdt->open_fds);
119 FD_CLR(fd, fdt->close_on_exec);
120 spin_unlock(&files->file_lock);
121 fd_install(fd, file);
123 spin_unlock(&files->file_lock);
130 asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd)
133 struct file * file, *tofree;
134 struct files_struct * files = current->files;
137 spin_lock(&files->file_lock);
138 if (!(file = fcheck(oldfd)))
144 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
146 get_file(file); /* We are now finished with oldfd */
148 err = expand_files(files, newfd);
152 /* To avoid races with open() and dup(), we will mark the fd as
153 * in-use in the open-file bitmap throughout the entire dup2()
154 * process. This is quite safe: do_close() uses the fd array
155 * entry, not the bitmap, to decide what work needs to be
157 /* Doesn't work. open() might be there first. --AV */
159 /* Yes. It's a race. In user space. Nothing sane to do */
161 fdt = files_fdtable(files);
162 tofree = fdt->fd[newfd];
163 if (!tofree && FD_ISSET(newfd, fdt->open_fds))
166 fdt->fd[newfd] = file;
167 FD_SET(newfd, fdt->open_fds);
168 FD_CLR(newfd, fdt->close_on_exec);
169 spin_unlock(&files->file_lock);
172 filp_close(tofree, files);
177 spin_unlock(&files->file_lock);
181 spin_unlock(&files->file_lock);
186 asmlinkage long sys_dup(unsigned int fildes)
189 struct file * file = fget(fildes);
192 ret = dupfd(file, 0);
196 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME)
198 static int setfl(int fd, struct file * filp, unsigned long arg)
200 struct inode * inode = filp->f_dentry->d_inode;
203 /* O_APPEND cannot be cleared if the file is marked as append-only */
204 if (!(arg & O_APPEND) && IS_APPEND(inode))
207 /* O_NOATIME can only be set by the owner or superuser */
208 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
209 if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
212 /* required for strict SunOS emulation */
213 if (O_NONBLOCK != O_NDELAY)
217 if (arg & O_DIRECT) {
218 if (!filp->f_mapping || !filp->f_mapping->a_ops ||
219 !filp->f_mapping->a_ops->direct_IO)
223 if (filp->f_op && filp->f_op->check_flags)
224 error = filp->f_op->check_flags(arg);
229 if ((arg ^ filp->f_flags) & FASYNC) {
230 if (filp->f_op && filp->f_op->fasync) {
231 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
237 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
243 static void f_modown(struct file *filp, unsigned long pid,
244 uid_t uid, uid_t euid, int force)
246 write_lock_irq(&filp->f_owner.lock);
247 if (force || !filp->f_owner.pid) {
248 filp->f_owner.pid = pid;
249 filp->f_owner.uid = uid;
250 filp->f_owner.euid = euid;
252 write_unlock_irq(&filp->f_owner.lock);
255 int f_setown(struct file *filp, unsigned long arg, int force)
259 err = security_file_set_fowner(filp);
263 f_modown(filp, arg, current->uid, current->euid, force);
267 EXPORT_SYMBOL(f_setown);
269 void f_delown(struct file *filp)
271 f_modown(filp, 0, 0, 0, 1);
274 static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
282 err = dupfd(filp, arg);
285 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
289 set_close_on_exec(fd, arg & FD_CLOEXEC);
295 err = setfl(fd, filp, arg);
298 err = fcntl_getlk(filp, (struct flock __user *) arg);
302 err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg);
306 * XXX If f_owner is a process group, the
307 * negative return value will get converted
308 * into an error. Oops. If we keep the
309 * current syscall conventions, the only way
310 * to fix this will be in libc.
312 err = filp->f_owner.pid;
313 force_successful_syscall_return();
316 err = f_setown(filp, arg, 1);
319 err = filp->f_owner.signum;
322 /* arg == 0 restores default behaviour. */
323 if (!valid_signal(arg)) {
327 filp->f_owner.signum = arg;
330 err = fcntl_getlease(filp);
333 err = fcntl_setlease(fd, filp, arg);
336 err = fcntl_dirnotify(fd, filp, arg);
344 asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg)
353 err = security_file_fcntl(filp, cmd, arg);
359 err = do_fcntl(fd, cmd, arg, filp);
366 #if BITS_PER_LONG == 32
367 asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
377 err = security_file_fcntl(filp, cmd, arg);
386 err = fcntl_getlk64(filp, (struct flock64 __user *) arg);
390 err = fcntl_setlk64(fd, filp, cmd,
391 (struct flock64 __user *) arg);
394 err = do_fcntl(fd, cmd, arg, filp);
403 /* Table to convert sigio signal codes into poll band bitmaps */
405 static long band_table[NSIGPOLL] = {
406 POLLIN | POLLRDNORM, /* POLL_IN */
407 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
408 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
409 POLLERR, /* POLL_ERR */
410 POLLPRI | POLLRDBAND, /* POLL_PRI */
411 POLLHUP | POLLERR /* POLL_HUP */
414 static inline int sigio_perm(struct task_struct *p,
415 struct fown_struct *fown, int sig)
417 return (((fown->euid == 0) ||
418 (fown->euid == p->suid) || (fown->euid == p->uid) ||
419 (fown->uid == p->suid) || (fown->uid == p->uid)) &&
420 !security_file_send_sigiotask(p, fown, sig));
423 static void send_sigio_to_task(struct task_struct *p,
424 struct fown_struct *fown,
428 if (!sigio_perm(p, fown, fown->signum))
431 switch (fown->signum) {
434 /* Queue a rt signal with the appropriate fd as its
435 value. We use SI_SIGIO as the source, not
436 SI_KERNEL, since kernel signals always get
437 delivered even if we can't queue. Failure to
438 queue in this case _should_ be reported; we fall
439 back to SIGIO in that case. --sct */
440 si.si_signo = fown->signum;
443 /* Make sure we are called with one of the POLL_*
444 reasons, otherwise we could leak kernel stack into
446 if ((reason & __SI_MASK) != __SI_POLL)
448 if (reason - POLL_IN >= NSIGPOLL)
451 si.si_band = band_table[reason - POLL_IN];
453 if (!send_group_sig_info(fown->signum, &si, p))
455 /* fall-through: fall back on the old plain SIGIO signal */
457 send_group_sig_info(SIGIO, SEND_SIG_PRIV, p);
461 void send_sigio(struct fown_struct *fown, int fd, int band)
463 struct task_struct *p;
466 read_lock(&fown->lock);
469 goto out_unlock_fown;
471 read_lock(&tasklist_lock);
473 p = find_task_by_pid(pid);
475 send_sigio_to_task(p, fown, fd, band);
478 do_each_task_pid(-pid, PIDTYPE_PGID, p) {
479 send_sigio_to_task(p, fown, fd, band);
480 } while_each_task_pid(-pid, PIDTYPE_PGID, p);
482 read_unlock(&tasklist_lock);
484 read_unlock(&fown->lock);
487 static void send_sigurg_to_task(struct task_struct *p,
488 struct fown_struct *fown)
490 if (sigio_perm(p, fown, SIGURG))
491 send_group_sig_info(SIGURG, SEND_SIG_PRIV, p);
494 int send_sigurg(struct fown_struct *fown)
496 struct task_struct *p;
499 read_lock(&fown->lock);
502 goto out_unlock_fown;
506 read_lock(&tasklist_lock);
508 p = find_task_by_pid(pid);
510 send_sigurg_to_task(p, fown);
513 do_each_task_pid(-pid, PIDTYPE_PGID, p) {
514 send_sigurg_to_task(p, fown);
515 } while_each_task_pid(-pid, PIDTYPE_PGID, p);
517 read_unlock(&tasklist_lock);
519 read_unlock(&fown->lock);
523 static DEFINE_RWLOCK(fasync_lock);
524 static kmem_cache_t *fasync_cache;
527 * fasync_helper() is used by some character device drivers (mainly mice)
528 * to set up the fasync queue. It returns negative on error, 0 if it did
529 * no changes and positive if it added/deleted the entry.
531 int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
533 struct fasync_struct *fa, **fp;
534 struct fasync_struct *new = NULL;
538 new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL);
542 write_lock_irq(&fasync_lock);
543 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
544 if (fa->fa_file == filp) {
547 kmem_cache_free(fasync_cache, new);
550 kmem_cache_free(fasync_cache, fa);
558 new->magic = FASYNC_MAGIC;
561 new->fa_next = *fapp;
566 write_unlock_irq(&fasync_lock);
570 EXPORT_SYMBOL(fasync_helper);
572 void __kill_fasync(struct fasync_struct *fa, int sig, int band)
575 struct fown_struct * fown;
576 if (fa->magic != FASYNC_MAGIC) {
577 printk(KERN_ERR "kill_fasync: bad magic number in "
581 fown = &fa->fa_file->f_owner;
582 /* Don't send SIGURG to processes which have not set a
583 queued signum: SIGURG has its own default signalling
585 if (!(sig == SIGURG && fown->signum == 0))
586 send_sigio(fown, fa->fa_fd, band);
591 EXPORT_SYMBOL(__kill_fasync);
593 void kill_fasync(struct fasync_struct **fp, int sig, int band)
595 /* First a quick test without locking: usually
599 read_lock(&fasync_lock);
600 /* reread *fp after obtaining the lock */
601 __kill_fasync(*fp, sig, band);
602 read_unlock(&fasync_lock);
605 EXPORT_SYMBOL(kill_fasync);
607 static int __init fasync_init(void)
609 fasync_cache = kmem_cache_create("fasync_cache",
610 sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL, NULL);
614 module_init(fasync_init)