1 #include <linux/slab.h>
2 #include <linux/file.h>
3 #include <linux/fdtable.h>
5 #include <linux/stat.h>
6 #include <linux/fcntl.h>
7 #include <linux/swap.h>
8 #include <linux/string.h>
9 #include <linux/init.h>
10 #include <linux/pagemap.h>
11 #include <linux/perf_event.h>
12 #include <linux/highmem.h>
13 #include <linux/spinlock.h>
14 #include <linux/key.h>
15 #include <linux/personality.h>
16 #include <linux/binfmts.h>
17 #include <linux/coredump.h>
18 #include <linux/utsname.h>
19 #include <linux/pid_namespace.h>
20 #include <linux/module.h>
21 #include <linux/namei.h>
22 #include <linux/mount.h>
23 #include <linux/security.h>
24 #include <linux/syscalls.h>
25 #include <linux/tsacct_kern.h>
26 #include <linux/cn_proc.h>
27 #include <linux/audit.h>
28 #include <linux/tracehook.h>
29 #include <linux/kmod.h>
30 #include <linux/fsnotify.h>
31 #include <linux/fs_struct.h>
32 #include <linux/pipe_fs_i.h>
33 #include <linux/oom.h>
34 #include <linux/compat.h>
35 #include <linux/timekeeping.h>
37 #include <asm/uaccess.h>
38 #include <asm/mmu_context.h>
42 #include <trace/events/task.h>
45 #include <trace/events/sched.h>
48 unsigned int core_pipe_limit;
49 char core_pattern[CORENAME_MAX_SIZE] = "core";
50 static int core_name_size = CORENAME_MAX_SIZE;
57 /* The maximal length of core_pattern is also specified in sysctl.c */
59 static int expand_corename(struct core_name *cn, int size)
61 char *corename = krealloc(cn->corename, size, GFP_KERNEL);
66 if (size > core_name_size) /* racy but harmless */
67 core_name_size = size;
69 cn->size = ksize(corename);
70 cn->corename = corename;
74 static __printf(2, 0) int cn_vprintf(struct core_name *cn, const char *fmt,
81 free = cn->size - cn->used;
83 va_copy(arg_copy, arg);
84 need = vsnprintf(cn->corename + cn->used, free, fmt, arg_copy);
92 if (!expand_corename(cn, cn->size + need - free + 1))
98 static __printf(2, 3) int cn_printf(struct core_name *cn, const char *fmt, ...)
104 ret = cn_vprintf(cn, fmt, arg);
110 static __printf(2, 3)
111 int cn_esc_printf(struct core_name *cn, const char *fmt, ...)
118 ret = cn_vprintf(cn, fmt, arg);
121 for (; cur < cn->used; ++cur) {
122 if (cn->corename[cur] == '/')
123 cn->corename[cur] = '!';
128 static int cn_print_exe_file(struct core_name *cn)
130 struct file *exe_file;
131 char *pathbuf, *path;
134 exe_file = get_mm_exe_file(current->mm);
136 return cn_esc_printf(cn, "%s (path unknown)", current->comm);
138 pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY);
144 path = file_path(exe_file, pathbuf, PATH_MAX);
150 ret = cn_esc_printf(cn, "%s", path);
159 /* format_corename will inspect the pattern parameter, and output a
160 * name into corename, which must have space for at least
161 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
163 static int format_corename(struct core_name *cn, struct coredump_params *cprm)
165 const struct cred *cred = current_cred();
166 const char *pat_ptr = core_pattern;
167 int ispipe = (*pat_ptr == '|');
168 int pid_in_pattern = 0;
173 if (expand_corename(cn, core_name_size))
175 cn->corename[0] = '\0';
180 /* Repeat as long as we have more pattern to process and more output
183 if (*pat_ptr != '%') {
184 err = cn_printf(cn, "%c", *pat_ptr++);
186 switch (*++pat_ptr) {
187 /* single % at the end, drop that */
190 /* Double percent, output one percent */
192 err = cn_printf(cn, "%c", '%');
197 err = cn_printf(cn, "%d",
198 task_tgid_vnr(current));
202 err = cn_printf(cn, "%d",
203 task_tgid_nr(current));
206 err = cn_printf(cn, "%d",
207 task_pid_vnr(current));
210 err = cn_printf(cn, "%d",
211 task_pid_nr(current));
215 err = cn_printf(cn, "%u",
216 from_kuid(&init_user_ns,
221 err = cn_printf(cn, "%u",
222 from_kgid(&init_user_ns,
226 err = cn_printf(cn, "%d",
227 __get_dumpable(cprm->mm_flags));
229 /* signal that caused the coredump */
231 err = cn_printf(cn, "%d",
232 cprm->siginfo->si_signo);
234 /* UNIX time of coredump */
238 time = ktime_get_real_seconds();
239 err = cn_printf(cn, "%lld", time);
245 err = cn_esc_printf(cn, "%s",
246 utsname()->nodename);
251 err = cn_esc_printf(cn, "%s", current->comm);
254 err = cn_print_exe_file(cn);
256 /* core limit size */
258 err = cn_printf(cn, "%lu",
259 rlimit(RLIMIT_CORE));
272 /* Backward compatibility with core_uses_pid:
274 * If core_pattern does not include a %p (as is the default)
275 * and core_uses_pid is set, then .%pid will be appended to
276 * the filename. Do not do this for piped commands. */
277 if (!ispipe && !pid_in_pattern && core_uses_pid) {
278 err = cn_printf(cn, ".%d", task_tgid_vnr(current));
285 static int zap_process(struct task_struct *start, int exit_code)
287 struct task_struct *t;
290 start->signal->group_exit_code = exit_code;
291 start->signal->group_stop_count = 0;
295 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
296 if (t != current && t->mm) {
297 sigaddset(&t->pending.signal, SIGKILL);
298 signal_wake_up(t, 1);
301 } while_each_thread(start, t);
306 static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
307 struct core_state *core_state, int exit_code)
309 struct task_struct *g, *p;
313 spin_lock_irq(&tsk->sighand->siglock);
314 if (!signal_group_exit(tsk->signal)) {
315 mm->core_state = core_state;
316 nr = zap_process(tsk, exit_code);
317 tsk->signal->group_exit_task = tsk;
318 /* ignore all signals except SIGKILL, see prepare_signal() */
319 tsk->signal->flags = SIGNAL_GROUP_COREDUMP;
320 clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
322 spin_unlock_irq(&tsk->sighand->siglock);
323 if (unlikely(nr < 0))
326 tsk->flags |= PF_DUMPCORE;
327 if (atomic_read(&mm->mm_users) == nr + 1)
330 * We should find and kill all tasks which use this mm, and we should
331 * count them correctly into ->nr_threads. We don't take tasklist
332 * lock, but this is safe wrt:
335 * None of sub-threads can fork after zap_process(leader). All
336 * processes which were created before this point should be
337 * visible to zap_threads() because copy_process() adds the new
338 * process to the tail of init_task.tasks list, and lock/unlock
339 * of ->siglock provides a memory barrier.
342 * The caller holds mm->mmap_sem. This means that the task which
343 * uses this mm can't pass exit_mm(), so it can't exit or clear
347 * It does list_replace_rcu(&leader->tasks, ¤t->tasks),
348 * we must see either old or new leader, this does not matter.
349 * However, it can change p->sighand, so lock_task_sighand(p)
350 * must be used. Since p->mm != NULL and we hold ->mmap_sem
353 * Note also that "g" can be the old leader with ->mm == NULL
354 * and already unhashed and thus removed from ->thread_group.
355 * This is OK, __unhash_process()->list_del_rcu() does not
356 * clear the ->next pointer, we will find the new leader via
360 for_each_process(g) {
361 if (g == tsk->group_leader)
363 if (g->flags & PF_KTHREAD)
368 if (unlikely(p->mm == mm)) {
369 lock_task_sighand(p, &flags);
370 nr += zap_process(p, exit_code);
371 p->signal->flags = SIGNAL_GROUP_EXIT;
372 unlock_task_sighand(p, &flags);
376 } while_each_thread(g, p);
380 atomic_set(&core_state->nr_threads, nr);
384 static int coredump_wait(int exit_code, struct core_state *core_state)
386 struct task_struct *tsk = current;
387 struct mm_struct *mm = tsk->mm;
388 int core_waiters = -EBUSY;
390 init_completion(&core_state->startup);
391 core_state->dumper.task = tsk;
392 core_state->dumper.next = NULL;
394 down_write(&mm->mmap_sem);
396 core_waiters = zap_threads(tsk, mm, core_state, exit_code);
397 up_write(&mm->mmap_sem);
399 if (core_waiters > 0) {
400 struct core_thread *ptr;
402 wait_for_completion(&core_state->startup);
404 * Wait for all the threads to become inactive, so that
405 * all the thread context (extended register state, like
406 * fpu etc) gets copied to the memory.
408 ptr = core_state->dumper.next;
409 while (ptr != NULL) {
410 wait_task_inactive(ptr->task, 0);
418 static void coredump_finish(struct mm_struct *mm, bool core_dumped)
420 struct core_thread *curr, *next;
421 struct task_struct *task;
423 spin_lock_irq(¤t->sighand->siglock);
424 if (core_dumped && !__fatal_signal_pending(current))
425 current->signal->group_exit_code |= 0x80;
426 current->signal->group_exit_task = NULL;
427 current->signal->flags = SIGNAL_GROUP_EXIT;
428 spin_unlock_irq(¤t->sighand->siglock);
430 next = mm->core_state->dumper.next;
431 while ((curr = next) != NULL) {
435 * see exit_mm(), curr->task must not see
436 * ->task == NULL before we read ->next.
440 wake_up_process(task);
443 mm->core_state = NULL;
446 static bool dump_interrupted(void)
449 * SIGKILL or freezing() interrupt the coredumping. Perhaps we
450 * can do try_to_freeze() and check __fatal_signal_pending(),
451 * but then we need to teach dump_write() to restart and clear
454 return signal_pending(current);
457 static void wait_for_dump_helpers(struct file *file)
459 struct pipe_inode_info *pipe = file->private_data;
464 wake_up_interruptible_sync(&pipe->wait);
465 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
469 * We actually want wait_event_freezable() but then we need
470 * to clear TIF_SIGPENDING and improve dump_interrupted().
472 wait_event_interruptible(pipe->wait, pipe->readers == 1);
482 * helper function to customize the process used
483 * to collect the core in userspace. Specifically
484 * it sets up a pipe and installs it as fd 0 (stdin)
485 * for the process. Returns 0 on success, or
486 * PTR_ERR on failure.
487 * Note that it also sets the core limit to 1. This
488 * is a special value that we use to trap recursive
491 static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
493 struct file *files[2];
494 struct coredump_params *cp = (struct coredump_params *)info->data;
495 int err = create_pipe_files(files, 0);
501 err = replace_fd(0, files[0], 0);
503 /* and disallow core files too */
504 current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
509 void do_coredump(const siginfo_t *siginfo)
511 struct core_state core_state;
513 struct mm_struct *mm = current->mm;
514 struct linux_binfmt * binfmt;
515 const struct cred *old_cred;
519 struct files_struct *displaced;
520 /* require nonrelative corefile path and be extra careful */
521 bool need_suid_safe = false;
522 bool core_dumped = false;
523 static atomic_t core_dump_count = ATOMIC_INIT(0);
524 struct coredump_params cprm = {
526 .regs = signal_pt_regs(),
527 .limit = rlimit(RLIMIT_CORE),
529 * We must use the same mm->flags while dumping core to avoid
530 * inconsistency of bit flags, since this flag is not protected
533 .mm_flags = mm->flags,
536 audit_core_dumps(siginfo->si_signo);
539 if (!binfmt || !binfmt->core_dump)
541 if (!__get_dumpable(cprm.mm_flags))
544 cred = prepare_creds();
548 * We cannot trust fsuid as being the "true" uid of the process
549 * nor do we know its entire history. We only know it was tainted
550 * so we dump it as root in mode 2, and only into a controlled
551 * environment (pipe handler or fully qualified path).
553 if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
554 /* Setuid core dump mode */
555 cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */
556 need_suid_safe = true;
559 retval = coredump_wait(siginfo->si_signo, &core_state);
563 old_cred = override_creds(cred);
565 ispipe = format_corename(&cn, &cprm);
570 struct subprocess_info *sub_info;
573 printk(KERN_WARNING "format_corename failed\n");
574 printk(KERN_WARNING "Aborting core\n");
578 if (cprm.limit == 1) {
579 /* See umh_pipe_setup() which sets RLIMIT_CORE = 1.
581 * Normally core limits are irrelevant to pipes, since
582 * we're not writing to the file system, but we use
583 * cprm.limit of 1 here as a special value, this is a
584 * consistent way to catch recursive crashes.
585 * We can still crash if the core_pattern binary sets
586 * RLIM_CORE = !1, but it runs as root, and can do
587 * lots of stupid things.
589 * Note that we use task_tgid_vnr here to grab the pid
590 * of the process group leader. That way we get the
591 * right pid if a thread in a multi-threaded
592 * core_pattern process dies.
595 "Process %d(%s) has RLIMIT_CORE set to 1\n",
596 task_tgid_vnr(current), current->comm);
597 printk(KERN_WARNING "Aborting core\n");
600 cprm.limit = RLIM_INFINITY;
602 dump_count = atomic_inc_return(&core_dump_count);
603 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
604 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
605 task_tgid_vnr(current), current->comm);
606 printk(KERN_WARNING "Skipping core dump\n");
610 helper_argv = argv_split(GFP_KERNEL, cn.corename, NULL);
612 printk(KERN_WARNING "%s failed to allocate memory\n",
618 sub_info = call_usermodehelper_setup(helper_argv[0],
619 helper_argv, NULL, GFP_KERNEL,
620 umh_pipe_setup, NULL, &cprm);
622 retval = call_usermodehelper_exec(sub_info,
625 argv_free(helper_argv);
627 printk(KERN_INFO "Core dump to |%s pipe failed\n",
634 if (cprm.limit < binfmt->min_coredump)
637 if (need_suid_safe && cn.corename[0] != '/') {
638 printk(KERN_WARNING "Pid %d(%s) can only dump core "\
639 "to fully qualified path!\n",
640 task_tgid_vnr(current), current->comm);
641 printk(KERN_WARNING "Skipping core dump\n");
646 * Unlink the file if it exists unless this is a SUID
647 * binary - in that case, we're running around with root
648 * privs and don't want to unlink another user's coredump.
650 if (!need_suid_safe) {
656 * If it doesn't exist, that's fine. If there's some
657 * other problem, we'll catch it at the filp_open().
659 (void) sys_unlink((const char __user *)cn.corename);
664 * There is a race between unlinking and creating the
665 * file, but if that causes an EEXIST here, that's
666 * fine - another process raced with us while creating
667 * the corefile, and the other process won. To userspace,
668 * what matters is that at least one of the two processes
669 * writes its coredump successfully, not which one.
671 cprm.file = filp_open(cn.corename,
672 O_CREAT | 2 | O_NOFOLLOW |
673 O_LARGEFILE | O_EXCL,
675 if (IS_ERR(cprm.file))
678 inode = file_inode(cprm.file);
679 if (inode->i_nlink > 1)
681 if (d_unhashed(cprm.file->f_path.dentry))
684 * AK: actually i see no reason to not allow this for named
685 * pipes etc, but keep the previous behaviour for now.
687 if (!S_ISREG(inode->i_mode))
690 * Don't dump core if the filesystem changed owner or mode
691 * of the file during file creation. This is an issue when
692 * a process dumps core while its cwd is e.g. on a vfat
695 if (!uid_eq(inode->i_uid, current_fsuid()))
697 if ((inode->i_mode & 0677) != 0600)
699 if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
701 if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
705 /* get us an unshared descriptor table; almost always a no-op */
706 retval = unshare_files(&displaced);
710 put_files_struct(displaced);
711 if (!dump_interrupted()) {
712 file_start_write(cprm.file);
713 core_dumped = binfmt->core_dump(&cprm);
714 file_end_write(cprm.file);
716 if (ispipe && core_pipe_limit)
717 wait_for_dump_helpers(cprm.file);
720 filp_close(cprm.file, NULL);
723 atomic_dec(&core_dump_count);
726 coredump_finish(mm, core_dumped);
727 revert_creds(old_cred);
735 * Core dumping helper functions. These are the only things you should
736 * do on a core-file: use only these functions to write out all the
739 int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
741 struct file *file = cprm->file;
742 loff_t pos = file->f_pos;
744 if (cprm->written + nr > cprm->limit)
747 if (dump_interrupted())
749 n = __kernel_write(file, addr, nr, &pos);
758 EXPORT_SYMBOL(dump_emit);
760 int dump_skip(struct coredump_params *cprm, size_t nr)
762 static char zeroes[PAGE_SIZE];
763 struct file *file = cprm->file;
764 if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
765 if (cprm->written + nr > cprm->limit)
767 if (dump_interrupted() ||
768 file->f_op->llseek(file, nr, SEEK_CUR) < 0)
773 while (nr > PAGE_SIZE) {
774 if (!dump_emit(cprm, zeroes, PAGE_SIZE))
778 return dump_emit(cprm, zeroes, nr);
781 EXPORT_SYMBOL(dump_skip);
783 int dump_align(struct coredump_params *cprm, int align)
785 unsigned mod = cprm->written & (align - 1);
786 if (align & (align - 1))
788 return mod ? dump_skip(cprm, align - mod) : 1;
790 EXPORT_SYMBOL(dump_align);