2 * a.out loader for x86-64
4 * Copyright (C) 1991, 1992, 1996 Linus Torvalds
5 * Hacked together by Andi Kleen
8 #include <linux/module.h>
10 #include <linux/time.h>
11 #include <linux/kernel.h>
13 #include <linux/mman.h>
14 #include <linux/a.out.h>
15 #include <linux/errno.h>
16 #include <linux/signal.h>
17 #include <linux/string.h>
19 #include <linux/file.h>
20 #include <linux/stat.h>
21 #include <linux/fcntl.h>
22 #include <linux/ptrace.h>
23 #include <linux/user.h>
24 #include <linux/binfmts.h>
25 #include <linux/personality.h>
26 #include <linux/init.h>
27 #include <linux/jiffies.h>
29 #include <asm/uaccess.h>
30 #include <asm/pgalloc.h>
31 #include <asm/cacheflush.h>
32 #include <asm/user32.h>
36 #undef CORE_DUMP /* definitely broken */
38 static int load_aout_binary(struct linux_binprm *, struct pt_regs *regs);
39 static int load_aout_library(struct file *);
42 static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
46 * fill in the user structure for a core dump..
48 static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
52 /* changed the size calculations - should hopefully work better. lbt */
55 dump->start_stack = regs->sp & ~(PAGE_SIZE - 1);
56 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
57 dump->u_dsize = ((unsigned long)
58 (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
59 dump->u_dsize -= dump->u_tsize;
61 dump->u_debugreg[0] = current->thread.debugreg0;
62 dump->u_debugreg[1] = current->thread.debugreg1;
63 dump->u_debugreg[2] = current->thread.debugreg2;
64 dump->u_debugreg[3] = current->thread.debugreg3;
65 dump->u_debugreg[4] = 0;
66 dump->u_debugreg[5] = 0;
67 dump->u_debugreg[6] = current->thread.debugreg6;
68 dump->u_debugreg[7] = current->thread.debugreg7;
70 if (dump->start_stack < 0xc0000000) {
73 tmp = (unsigned long) (0xc0000000 - dump->start_stack);
74 dump->u_ssize = tmp >> PAGE_SHIFT;
77 dump->regs.bx = regs->bx;
78 dump->regs.cx = regs->cx;
79 dump->regs.dx = regs->dx;
80 dump->regs.si = regs->si;
81 dump->regs.di = regs->di;
82 dump->regs.bp = regs->bp;
83 dump->regs.ax = regs->ax;
84 dump->regs.ds = current->thread.ds;
85 dump->regs.es = current->thread.es;
90 dump->regs.orig_ax = regs->orig_ax;
91 dump->regs.ip = regs->ip;
92 dump->regs.cs = regs->cs;
93 dump->regs.flags = regs->flags;
94 dump->regs.sp = regs->sp;
95 dump->regs.ss = regs->ss;
100 dump->u_fpvalid = dump_fpu(regs, &dump->i387);
106 static struct linux_binfmt aout_format = {
107 .module = THIS_MODULE,
108 .load_binary = load_aout_binary,
109 .load_shlib = load_aout_library,
111 .core_dump = aout_core_dump,
113 .min_coredump = PAGE_SIZE
116 static void set_brk(unsigned long start, unsigned long end)
118 start = PAGE_ALIGN(start);
119 end = PAGE_ALIGN(end);
122 vm_brk(start, end - start);
127 * These are the only things you should do on a core-file: use only these
128 * macros to write out all the necessary info.
131 #include <linux/coredump.h>
133 #define DUMP_WRITE(addr, nr) \
134 if (!dump_write(file, (void *)(addr), (nr))) \
137 #define DUMP_SEEK(offset) \
138 if (!dump_seek(file, offset)) \
141 #define START_DATA() (u.u_tsize << PAGE_SHIFT)
142 #define START_STACK(u) (u.start_stack)
145 * Routine writes a core dump image in the current directory.
146 * Currently only a stub-function.
148 * Note that setuid/setgid files won't make a core-dump if the uid/gid
149 * changed due to the set[u|g]id. It's enforced by the "current->mm->dumpable"
150 * field, which also makes sure the core-dumps won't be recursive if the
151 * dumping of the process results in another error..
154 static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
159 unsigned long dump_start, dump_size;
165 current->flags |= PF_DUMPCORE;
166 strncpy(dump.u_comm, current->comm, sizeof(current->comm));
167 dump.u_ar0 = offsetof(struct user32, regs);
169 dump_thread32(regs, &dump);
172 * If the size of the dump file exceeds the rlimit, then see
173 * what would happen if we wrote the stack, but not the data
176 if ((dump.u_dsize + dump.u_ssize + 1) * PAGE_SIZE > limit)
179 /* Make sure we have enough room to write the stack and data areas. */
180 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
183 /* make sure we actually have a data and stack area to dump */
185 if (!access_ok(VERIFY_READ, (void *) (unsigned long)START_DATA(dump),
186 dump.u_dsize << PAGE_SHIFT))
188 if (!access_ok(VERIFY_READ, (void *) (unsigned long)START_STACK(dump),
189 dump.u_ssize << PAGE_SHIFT))
194 DUMP_WRITE(&dump, sizeof(dump));
195 /* Now dump all of the user data. Include malloced stuff as well */
196 DUMP_SEEK(PAGE_SIZE);
197 /* now we start writing out the user space info */
199 /* Dump the data area */
200 if (dump.u_dsize != 0) {
201 dump_start = START_DATA(dump);
202 dump_size = dump.u_dsize << PAGE_SHIFT;
203 DUMP_WRITE(dump_start, dump_size);
205 /* Now prepare to dump the stack area */
206 if (dump.u_ssize != 0) {
207 dump_start = START_STACK(dump);
208 dump_size = dump.u_ssize << PAGE_SHIFT;
209 DUMP_WRITE(dump_start, dump_size);
218 * create_aout_tables() parses the env- and arg-strings in new user
219 * memory and creates the pointer tables from them, and puts their
220 * addresses on the "stack", returning the new stack pointer value.
222 static u32 __user *create_aout_tables(char __user *p, struct linux_binprm *bprm)
224 u32 __user *argv, *envp, *sp;
225 int argc = bprm->argc, envc = bprm->envc;
227 sp = (u32 __user *) ((-(unsigned long)sizeof(u32)) & (unsigned long) p);
232 put_user((unsigned long) envp, --sp);
233 put_user((unsigned long) argv, --sp);
234 put_user(argc, --sp);
235 current->mm->arg_start = (unsigned long) p;
239 put_user((u32)(unsigned long)p, argv++);
245 current->mm->arg_end = current->mm->env_start = (unsigned long) p;
249 put_user((u32)(unsigned long)p, envp++);
255 current->mm->env_end = (unsigned long) p;
260 * These are the functions used to load a.out style executables and shared
261 * libraries. There is no binary dependent code anywhere else.
263 static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
265 unsigned long error, fd_offset, rlim;
269 ex = *((struct exec *) bprm->buf); /* exec-header */
270 if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC &&
271 N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) ||
272 N_TRSIZE(ex) || N_DRSIZE(ex) ||
273 i_size_read(bprm->file->f_path.dentry->d_inode) <
274 ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
278 fd_offset = N_TXTOFF(ex);
280 /* Check initial limits. This avoids letting people circumvent
281 * size limits imposed on them by creating programs with large
282 * arrays in the data or bss.
284 rlim = rlimit(RLIMIT_DATA);
285 if (rlim >= RLIM_INFINITY)
287 if (ex.a_data + ex.a_bss > rlim)
290 /* Flush all traces of the currently running executable */
291 retval = flush_old_exec(bprm);
295 /* OK, This is the point of no return */
296 set_personality(PER_LINUX);
297 set_personality_ia32(false);
299 setup_new_exec(bprm);
301 regs->cs = __USER32_CS;
302 regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 =
303 regs->r13 = regs->r14 = regs->r15 = 0;
305 current->mm->end_code = ex.a_text +
306 (current->mm->start_code = N_TXTADDR(ex));
307 current->mm->end_data = ex.a_data +
308 (current->mm->start_data = N_DATADDR(ex));
309 current->mm->brk = ex.a_bss +
310 (current->mm->start_brk = N_BSSADDR(ex));
311 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
312 current->mm->cached_hole_size = 0;
314 retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
316 /* Someone check-me: is this error path enough? */
317 send_sig(SIGKILL, current, 0);
321 install_exec_creds(bprm);
323 if (N_MAGIC(ex) == OMAGIC) {
324 unsigned long text_addr, map_size;
327 text_addr = N_TXTADDR(ex);
330 map_size = ex.a_text+ex.a_data;
332 error = vm_brk(text_addr & PAGE_MASK, map_size);
334 if (error != (text_addr & PAGE_MASK)) {
335 send_sig(SIGKILL, current, 0);
339 error = bprm->file->f_op->read(bprm->file,
340 (char __user *)text_addr,
341 ex.a_text+ex.a_data, &pos);
342 if ((signed long)error < 0) {
343 send_sig(SIGKILL, current, 0);
347 flush_icache_range(text_addr, text_addr+ex.a_text+ex.a_data);
350 static unsigned long error_time, error_time2;
351 if ((ex.a_text & 0xfff || ex.a_data & 0xfff) &&
352 (N_MAGIC(ex) != NMAGIC) &&
353 time_after(jiffies, error_time2 + 5*HZ)) {
354 printk(KERN_NOTICE "executable not page aligned\n");
355 error_time2 = jiffies;
358 if ((fd_offset & ~PAGE_MASK) != 0 &&
359 time_after(jiffies, error_time + 5*HZ)) {
361 "fd_offset is not page aligned. Please convert "
363 bprm->file->f_path.dentry->d_name.name);
364 error_time = jiffies;
368 if (!bprm->file->f_op->mmap || (fd_offset & ~PAGE_MASK) != 0) {
369 loff_t pos = fd_offset;
371 vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
372 bprm->file->f_op->read(bprm->file,
373 (char __user *)N_TXTADDR(ex),
374 ex.a_text+ex.a_data, &pos);
375 flush_icache_range((unsigned long) N_TXTADDR(ex),
376 (unsigned long) N_TXTADDR(ex) +
377 ex.a_text+ex.a_data);
381 error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
382 PROT_READ | PROT_EXEC,
383 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
384 MAP_EXECUTABLE | MAP_32BIT,
387 if (error != N_TXTADDR(ex)) {
388 send_sig(SIGKILL, current, 0);
392 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
393 PROT_READ | PROT_WRITE | PROT_EXEC,
394 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
395 MAP_EXECUTABLE | MAP_32BIT,
396 fd_offset + ex.a_text);
397 if (error != N_DATADDR(ex)) {
398 send_sig(SIGKILL, current, 0);
403 set_binfmt(&aout_format);
405 set_brk(current->mm->start_brk, current->mm->brk);
407 current->mm->start_stack =
408 (unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
411 loadsegment(ds, __USER32_DS);
412 loadsegment(es, __USER32_DS);
414 (regs)->ip = ex.a_entry;
415 (regs)->sp = current->mm->start_stack;
416 (regs)->flags = 0x200;
417 (regs)->cs = __USER32_CS;
418 (regs)->ss = __USER32_DS;
419 regs->r8 = regs->r9 = regs->r10 = regs->r11 =
420 regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;
425 static int load_aout_library(struct file *file)
428 unsigned long bss, start_addr, len, error;
432 inode = file->f_path.dentry->d_inode;
435 error = kernel_read(file, 0, (char *) &ex, sizeof(ex));
436 if (error != sizeof(ex))
439 /* We come in here for the regular a.out style of shared libraries */
440 if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != QMAGIC) || N_TRSIZE(ex) ||
441 N_DRSIZE(ex) || ((ex.a_entry & 0xfff) && N_MAGIC(ex) == ZMAGIC) ||
443 ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
450 /* For QMAGIC, the starting address is 0x20 into the page. We mask
451 this off to get the starting address for the page */
453 start_addr = ex.a_entry & 0xfffff000;
455 if ((N_TXTOFF(ex) & ~PAGE_MASK) != 0) {
456 loff_t pos = N_TXTOFF(ex);
459 static unsigned long error_time;
460 if (time_after(jiffies, error_time + 5*HZ)) {
462 "N_TXTOFF is not page aligned. Please convert "
464 file->f_path.dentry->d_name.name);
465 error_time = jiffies;
468 vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
470 file->f_op->read(file, (char __user *)start_addr,
471 ex.a_text + ex.a_data, &pos);
472 flush_icache_range((unsigned long) start_addr,
473 (unsigned long) start_addr + ex.a_text +
479 /* Now use mmap to map the library into memory. */
480 error = vm_mmap(file, start_addr, ex.a_text + ex.a_data,
481 PROT_READ | PROT_WRITE | PROT_EXEC,
482 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_32BIT,
485 if (error != start_addr)
488 len = PAGE_ALIGN(ex.a_text + ex.a_data);
489 bss = ex.a_text + ex.a_data + ex.a_bss;
491 error = vm_brk(start_addr + len, bss - len);
493 if (error != start_addr + len)
501 static int __init init_aout_binfmt(void)
503 register_binfmt(&aout_format);
507 static void __exit exit_aout_binfmt(void)
509 unregister_binfmt(&aout_format);
512 module_init(init_aout_binfmt);
513 module_exit(exit_aout_binfmt);
514 MODULE_LICENSE("GPL");