2 * linux/arch/m68k/kernel/sys_m68k.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
9 #include <linux/capability.h>
10 #include <linux/errno.h>
11 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/sem.h>
16 #include <linux/msg.h>
17 #include <linux/shm.h>
18 #include <linux/stat.h>
19 #include <linux/syscalls.h>
20 #include <linux/mman.h>
21 #include <linux/file.h>
22 #include <linux/ipc.h>
24 #include <asm/setup.h>
25 #include <asm/uaccess.h>
26 #include <asm/cachectl.h>
27 #include <asm/traps.h>
29 #include <asm/unistd.h>
30 #include <asm/cacheflush.h>
36 asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
37 unsigned long error_code);
39 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
40 unsigned long prot, unsigned long flags,
41 unsigned long fd, unsigned long pgoff)
44 * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
45 * so we need to shift the argument down by 1; m68k mmap64(3)
46 * (in libc) expects the last argument of mmap2 in 4Kb units.
48 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
51 /* Convert virtual (user) address VADDR to physical address PADDR */
52 #define virt_to_phys_040(vaddr) \
54 unsigned long _mmusr, _paddr; \
56 __asm__ __volatile__ (".chip 68040\n\t" \
58 "movec %%mmusr,%0\n\t" \
62 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
67 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
69 unsigned long paddr, i;
76 case FLUSH_CACHE_DATA:
77 /* This nop is needed for some broken versions of the 68040. */
78 __asm__ __volatile__ ("nop\n\t"
83 case FLUSH_CACHE_INSN:
84 __asm__ __volatile__ ("nop\n\t"
90 case FLUSH_CACHE_BOTH:
91 __asm__ __volatile__ ("nop\n\t"
99 case FLUSH_SCOPE_LINE:
100 /* Find the physical address of the first mapped page in the
102 if ((paddr = virt_to_phys_040(addr))) {
103 paddr += addr & ~(PAGE_MASK | 15);
104 len = (len + (addr & 15) + 15) >> 4;
106 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
115 if ((paddr = virt_to_phys_040(addr)))
122 len = (len + 15) >> 4;
124 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
129 case FLUSH_CACHE_DATA:
130 __asm__ __volatile__ ("nop\n\t"
132 "cpushl %%dc,(%0)\n\t"
136 case FLUSH_CACHE_INSN:
137 __asm__ __volatile__ ("nop\n\t"
139 "cpushl %%ic,(%0)\n\t"
144 case FLUSH_CACHE_BOTH:
145 __asm__ __volatile__ ("nop\n\t"
147 "cpushl %%bc,(%0)\n\t"
155 * No need to page align here since it is done by
156 * virt_to_phys_040().
160 /* Recompute physical address when crossing a page
164 if ((paddr = virt_to_phys_040(addr)))
178 case FLUSH_SCOPE_PAGE:
179 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
180 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
182 if (!(paddr = virt_to_phys_040(addr)))
186 case FLUSH_CACHE_DATA:
187 __asm__ __volatile__ ("nop\n\t"
189 "cpushp %%dc,(%0)\n\t"
193 case FLUSH_CACHE_INSN:
194 __asm__ __volatile__ ("nop\n\t"
196 "cpushp %%ic,(%0)\n\t"
201 case FLUSH_CACHE_BOTH:
202 __asm__ __volatile__ ("nop\n\t"
204 "cpushp %%bc,(%0)\n\t"
215 #define virt_to_phys_060(vaddr) \
217 unsigned long paddr; \
218 __asm__ __volatile__ (".chip 68060\n\t" \
227 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
229 unsigned long paddr, i;
233 * cpush %dc : flush DC, remains valid (with our %cacr setup)
234 * cpush %ic : invalidate IC
235 * cpush %bc : flush DC + invalidate IC
239 case FLUSH_SCOPE_ALL:
242 case FLUSH_CACHE_DATA:
243 __asm__ __volatile__ (".chip 68060\n\t"
247 case FLUSH_CACHE_INSN:
248 __asm__ __volatile__ (".chip 68060\n\t"
253 case FLUSH_CACHE_BOTH:
254 __asm__ __volatile__ (".chip 68060\n\t"
261 case FLUSH_SCOPE_LINE:
262 /* Find the physical address of the first mapped page in the
266 if (!(paddr = virt_to_phys_060(addr))) {
267 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
276 if ((paddr = virt_to_phys_060(addr)))
284 len = (len + 15) >> 4;
285 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
290 case FLUSH_CACHE_DATA:
291 __asm__ __volatile__ (".chip 68060\n\t"
292 "cpushl %%dc,(%0)\n\t"
296 case FLUSH_CACHE_INSN:
297 __asm__ __volatile__ (".chip 68060\n\t"
298 "cpushl %%ic,(%0)\n\t"
303 case FLUSH_CACHE_BOTH:
304 __asm__ __volatile__ (".chip 68060\n\t"
305 "cpushl %%bc,(%0)\n\t"
314 * We just want to jump to the first cache line
321 /* Recompute physical address when crossing a page
325 if ((paddr = virt_to_phys_060(addr)))
339 case FLUSH_SCOPE_PAGE:
340 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
341 addr &= PAGE_MASK; /* Workaround for bug in some
342 revisions of the 68060 */
343 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
345 if (!(paddr = virt_to_phys_060(addr)))
349 case FLUSH_CACHE_DATA:
350 __asm__ __volatile__ (".chip 68060\n\t"
351 "cpushp %%dc,(%0)\n\t"
355 case FLUSH_CACHE_INSN:
356 __asm__ __volatile__ (".chip 68060\n\t"
357 "cpushp %%ic,(%0)\n\t"
362 case FLUSH_CACHE_BOTH:
363 __asm__ __volatile__ (".chip 68060\n\t"
364 "cpushp %%bc,(%0)\n\t"
375 /* sys_cacheflush -- flush (part of) the processor cache. */
377 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
379 struct vm_area_struct *vma;
382 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
383 cache & ~FLUSH_CACHE_BOTH)
386 if (scope == FLUSH_SCOPE_ALL) {
387 /* Only the superuser may explicitly flush the whole cache. */
389 if (!capable(CAP_SYS_ADMIN))
393 * Verify that the specified address region actually belongs
396 vma = find_vma (current->mm, addr);
398 /* Check for overflow. */
399 if (addr + len < addr)
401 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
405 if (CPU_IS_020_OR_030) {
406 if (scope == FLUSH_SCOPE_LINE && len < 256) {
408 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
409 if (cache & FLUSH_CACHE_INSN)
411 if (cache & FLUSH_CACHE_DATA)
415 __asm__ __volatile__ ("movec %1, %%caar\n\t"
418 : "r" (cacr), "r" (addr));
422 /* Flush the whole cache, even if page granularity requested. */
424 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
425 if (cache & FLUSH_CACHE_INSN)
427 if (cache & FLUSH_CACHE_DATA)
429 __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
435 * 040 or 060: don't blindly trust 'scope', someone could
436 * try to flush a few megs of memory.
439 if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
440 scope=FLUSH_SCOPE_PAGE;
441 if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
442 scope=FLUSH_SCOPE_ALL;
444 ret = cache_flush_040 (addr, scope, cache, len);
445 } else if (CPU_IS_060) {
446 ret = cache_flush_060 (addr, scope, cache, len);
453 /* This syscall gets its arguments in A0 (mem), D2 (oldval) and
456 sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
457 unsigned long __user * mem)
459 /* This was borrowed from ARM's implementation. */
461 struct mm_struct *mm = current->mm;
466 unsigned long mem_value;
468 down_read(&mm->mmap_sem);
469 pgd = pgd_offset(mm, (unsigned long)mem);
470 if (!pgd_present(*pgd))
472 pmd = pmd_offset(pgd, (unsigned long)mem);
473 if (!pmd_present(*pmd))
475 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
476 if (!pte_present(*pte) || !pte_dirty(*pte)
477 || !pte_write(*pte)) {
478 pte_unmap_unlock(pte, ptl);
483 * No need to check for EFAULT; we know that the page is
484 * present and writable.
486 __get_user(mem_value, mem);
487 if (mem_value == oldval)
488 __put_user(newval, mem);
490 pte_unmap_unlock(pte, ptl);
491 up_read(&mm->mmap_sem);
495 up_read(&mm->mmap_sem);
496 /* This is not necessarily a bad access, we can get here if
497 a memory we're trying to write to should be copied-on-write.
498 Make the kernel do the necessary page stuff, then re-iterate.
499 Simulate a write access fault to do that. */
501 /* The first argument of the function corresponds to
502 D1, which is the first field of struct pt_regs. */
503 struct pt_regs *fp = (struct pt_regs *)&newval;
505 /* '3' is an RMW flag. */
506 if (do_page_fault(fp, (unsigned long)mem, 3))
507 /* If the do_page_fault() failed, we don't
508 have anything meaningful to return.
509 There should be a SIGSEGV pending for
518 /* sys_cacheflush -- flush (part of) the processor cache. */
520 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
526 /* This syscall gets its arguments in A0 (mem), D2 (oldval) and
529 sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
530 unsigned long __user * mem)
532 struct mm_struct *mm = current->mm;
533 unsigned long mem_value;
535 down_read(&mm->mmap_sem);
538 if (mem_value == oldval)
541 up_read(&mm->mmap_sem);
545 #endif /* CONFIG_MMU */
547 asmlinkage int sys_getpagesize(void)
553 * Do a system call from kernel instead of calling sys_execve so we
554 * end up with proper pt_regs.
556 int kernel_execve(const char *filename,
557 const char *const argv[],
558 const char *const envp[])
560 register long __res asm ("%d0") = __NR_execve;
561 register long __a asm ("%d1") = (long)(filename);
562 register long __b asm ("%d2") = (long)(argv);
563 register long __c asm ("%d3") = (long)(envp);
564 asm volatile ("trap #0" : "+d" (__res)
565 : "d" (__a), "d" (__b), "d" (__c));
569 asmlinkage unsigned long sys_get_thread_area(void)
571 return current_thread_info()->tp_value;
574 asmlinkage int sys_set_thread_area(unsigned long tp)
576 current_thread_info()->tp_value = tp;
580 asmlinkage int sys_atomic_barrier(void)
582 /* no code needed for uniprocs */