2 * linux/arch/m68k/kernel/sys_m68k.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
9 #include <linux/capability.h>
10 #include <linux/errno.h>
11 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/smp_lock.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/syscalls.h>
21 #include <linux/mman.h>
22 #include <linux/file.h>
23 #include <linux/ipc.h>
25 #include <asm/setup.h>
26 #include <asm/uaccess.h>
27 #include <asm/cachectl.h>
28 #include <asm/traps.h>
30 #include <asm/unistd.h>
32 /* common code for old and new mmaps */
33 static inline long do_mmap2(
34 unsigned long addr, unsigned long len,
35 unsigned long prot, unsigned long flags,
36 unsigned long fd, unsigned long pgoff)
39 struct file * file = NULL;
41 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
42 if (!(flags & MAP_ANONYMOUS)) {
48 down_write(¤t->mm->mmap_sem);
49 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
50 up_write(¤t->mm->mmap_sem);
58 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
59 unsigned long prot, unsigned long flags,
60 unsigned long fd, unsigned long pgoff)
62 return do_mmap2(addr, len, prot, flags, fd, pgoff);
66 * Perform the select(nd, in, out, ex, tv) and mmap() system
67 * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
68 * handle more than 4 system call parameters, so these system calls
69 * used a memory block for parameter passing..
72 struct mmap_arg_struct {
81 asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
83 struct mmap_arg_struct a;
86 if (copy_from_user(&a, arg, sizeof(a)))
90 if (a.offset & ~PAGE_MASK)
93 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
95 error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
101 struct mmap_arg_struct64 {
106 __u64 offset; /* 64 bits */
110 asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
113 struct file * file = NULL;
114 struct mmap_arg_struct64 a;
117 if (copy_from_user(&a, arg, sizeof(a)))
120 if ((long)a.offset & ~PAGE_MASK)
123 pgoff = a.offset >> PAGE_SHIFT;
124 if ((a.offset >> PAGE_SHIFT) != pgoff)
127 if (!(a.flags & MAP_ANONYMOUS)) {
133 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
135 down_write(¤t->mm->mmap_sem);
136 error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
137 up_write(¤t->mm->mmap_sem);
145 struct sel_arg_struct {
147 fd_set __user *inp, *outp, *exp;
148 struct timeval __user *tvp;
151 asmlinkage int old_select(struct sel_arg_struct __user *arg)
153 struct sel_arg_struct a;
155 if (copy_from_user(&a, arg, sizeof(a)))
157 /* sys_select() does the appropriate kernel locking */
158 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
162 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
164 * This is really horribly ugly.
166 asmlinkage int sys_ipc (uint call, int first, int second,
167 int third, void __user *ptr, long fifth)
171 version = call >> 16; /* hack for backward compatibility */
177 return sys_semop (first, ptr, second);
179 return sys_semget (first, second, third);
184 if (get_user(fourth.__pad, (void __user *__user *) ptr))
186 return sys_semctl (first, second, third, fourth);
194 return sys_msgsnd (first, ptr, second, third);
198 struct ipc_kludge tmp;
201 if (copy_from_user (&tmp, ptr, sizeof (tmp)))
203 return sys_msgrcv (first, tmp.msgp, second,
207 return sys_msgrcv (first, ptr,
208 second, fifth, third);
211 return sys_msgget ((key_t) first, second);
213 return sys_msgctl (first, second, ptr);
223 ret = do_shmat (first, ptr, second, &raddr);
226 return put_user (raddr, (ulong __user *) third);
230 return sys_shmdt (ptr);
232 return sys_shmget (first, second, third);
234 return sys_shmctl (first, second, ptr);
242 /* Convert virtual (user) address VADDR to physical address PADDR */
243 #define virt_to_phys_040(vaddr) \
245 unsigned long _mmusr, _paddr; \
247 __asm__ __volatile__ (".chip 68040\n\t" \
249 "movec %%mmusr,%0\n\t" \
253 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
258 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
260 unsigned long paddr, i;
264 case FLUSH_SCOPE_ALL:
267 case FLUSH_CACHE_DATA:
268 /* This nop is needed for some broken versions of the 68040. */
269 __asm__ __volatile__ ("nop\n\t"
274 case FLUSH_CACHE_INSN:
275 __asm__ __volatile__ ("nop\n\t"
281 case FLUSH_CACHE_BOTH:
282 __asm__ __volatile__ ("nop\n\t"
290 case FLUSH_SCOPE_LINE:
291 /* Find the physical address of the first mapped page in the
293 if ((paddr = virt_to_phys_040(addr))) {
294 paddr += addr & ~(PAGE_MASK | 15);
295 len = (len + (addr & 15) + 15) >> 4;
297 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
306 if ((paddr = virt_to_phys_040(addr)))
313 len = (len + 15) >> 4;
315 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
320 case FLUSH_CACHE_DATA:
321 __asm__ __volatile__ ("nop\n\t"
323 "cpushl %%dc,(%0)\n\t"
327 case FLUSH_CACHE_INSN:
328 __asm__ __volatile__ ("nop\n\t"
330 "cpushl %%ic,(%0)\n\t"
335 case FLUSH_CACHE_BOTH:
336 __asm__ __volatile__ ("nop\n\t"
338 "cpushl %%bc,(%0)\n\t"
346 * No need to page align here since it is done by
347 * virt_to_phys_040().
351 /* Recompute physical address when crossing a page
355 if ((paddr = virt_to_phys_040(addr)))
369 case FLUSH_SCOPE_PAGE:
370 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
371 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
373 if (!(paddr = virt_to_phys_040(addr)))
377 case FLUSH_CACHE_DATA:
378 __asm__ __volatile__ ("nop\n\t"
380 "cpushp %%dc,(%0)\n\t"
384 case FLUSH_CACHE_INSN:
385 __asm__ __volatile__ ("nop\n\t"
387 "cpushp %%ic,(%0)\n\t"
392 case FLUSH_CACHE_BOTH:
393 __asm__ __volatile__ ("nop\n\t"
395 "cpushp %%bc,(%0)\n\t"
406 #define virt_to_phys_060(vaddr) \
408 unsigned long paddr; \
409 __asm__ __volatile__ (".chip 68060\n\t" \
418 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
420 unsigned long paddr, i;
424 * cpush %dc : flush DC, remains valid (with our %cacr setup)
425 * cpush %ic : invalidate IC
426 * cpush %bc : flush DC + invalidate IC
430 case FLUSH_SCOPE_ALL:
433 case FLUSH_CACHE_DATA:
434 __asm__ __volatile__ (".chip 68060\n\t"
438 case FLUSH_CACHE_INSN:
439 __asm__ __volatile__ (".chip 68060\n\t"
444 case FLUSH_CACHE_BOTH:
445 __asm__ __volatile__ (".chip 68060\n\t"
452 case FLUSH_SCOPE_LINE:
453 /* Find the physical address of the first mapped page in the
457 if (!(paddr = virt_to_phys_060(addr))) {
458 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
467 if ((paddr = virt_to_phys_060(addr)))
475 len = (len + 15) >> 4;
476 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
481 case FLUSH_CACHE_DATA:
482 __asm__ __volatile__ (".chip 68060\n\t"
483 "cpushl %%dc,(%0)\n\t"
487 case FLUSH_CACHE_INSN:
488 __asm__ __volatile__ (".chip 68060\n\t"
489 "cpushl %%ic,(%0)\n\t"
494 case FLUSH_CACHE_BOTH:
495 __asm__ __volatile__ (".chip 68060\n\t"
496 "cpushl %%bc,(%0)\n\t"
505 * We just want to jump to the first cache line
512 /* Recompute physical address when crossing a page
516 if ((paddr = virt_to_phys_060(addr)))
530 case FLUSH_SCOPE_PAGE:
531 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
532 addr &= PAGE_MASK; /* Workaround for bug in some
533 revisions of the 68060 */
534 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
536 if (!(paddr = virt_to_phys_060(addr)))
540 case FLUSH_CACHE_DATA:
541 __asm__ __volatile__ (".chip 68060\n\t"
542 "cpushp %%dc,(%0)\n\t"
546 case FLUSH_CACHE_INSN:
547 __asm__ __volatile__ (".chip 68060\n\t"
548 "cpushp %%ic,(%0)\n\t"
553 case FLUSH_CACHE_BOTH:
554 __asm__ __volatile__ (".chip 68060\n\t"
555 "cpushp %%bc,(%0)\n\t"
566 /* sys_cacheflush -- flush (part of) the processor cache. */
568 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
570 struct vm_area_struct *vma;
574 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
575 cache & ~FLUSH_CACHE_BOTH)
578 if (scope == FLUSH_SCOPE_ALL) {
579 /* Only the superuser may explicitly flush the whole cache. */
581 if (!capable(CAP_SYS_ADMIN))
585 * Verify that the specified address region actually belongs
588 vma = find_vma (current->mm, addr);
590 /* Check for overflow. */
591 if (addr + len < addr)
593 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
597 if (CPU_IS_020_OR_030) {
598 if (scope == FLUSH_SCOPE_LINE && len < 256) {
600 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
601 if (cache & FLUSH_CACHE_INSN)
603 if (cache & FLUSH_CACHE_DATA)
607 __asm__ __volatile__ ("movec %1, %%caar\n\t"
610 : "r" (cacr), "r" (addr));
614 /* Flush the whole cache, even if page granularity requested. */
616 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
617 if (cache & FLUSH_CACHE_INSN)
619 if (cache & FLUSH_CACHE_DATA)
621 __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
627 * 040 or 060: don't blindly trust 'scope', someone could
628 * try to flush a few megs of memory.
631 if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
632 scope=FLUSH_SCOPE_PAGE;
633 if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
634 scope=FLUSH_SCOPE_ALL;
636 ret = cache_flush_040 (addr, scope, cache, len);
637 } else if (CPU_IS_060) {
638 ret = cache_flush_060 (addr, scope, cache, len);
646 asmlinkage int sys_getpagesize(void)
652 * Do a system call from kernel instead of calling sys_execve so we
653 * end up with proper pt_regs.
655 int kernel_execve(const char *filename, char *const argv[], char *const envp[])
657 register long __res asm ("%d0") = __NR_execve;
658 register long __a asm ("%d1") = (long)(filename);
659 register long __b asm ("%d2") = (long)(argv);
660 register long __c asm ("%d3") = (long)(envp);
661 asm volatile ("trap #0" : "+d" (__res)
662 : "d" (__a), "d" (__b), "d" (__c));