2 * Blackfin architecture-dependent process handling
4 * Copyright 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later
9 #include <linux/module.h>
10 #include <linux/unistd.h>
11 #include <linux/user.h>
12 #include <linux/uaccess.h>
13 #include <linux/slab.h>
14 #include <linux/sched.h>
15 #include <linux/tick.h>
17 #include <linux/err.h>
19 #include <asm/blackfin.h>
20 #include <asm/fixed_code.h>
21 #include <asm/mem_map.h>
23 asmlinkage void ret_from_fork(void);
25 /* Points to the SDRAM backup memory for the stack that is currently in
26 * L1 scratchpad memory.
28 void *current_l1_stack_save;
30 /* The number of tasks currently using a L1 stack area. The SRAM is
31 * allocated/deallocated whenever this changes from/to zero.
35 /* Start and length of the area in L1 scratchpad memory which we've allocated
39 unsigned long l1_stack_len;
42 * Powermanagement idle function, if any..
44 void (*pm_idle)(void) = NULL;
45 EXPORT_SYMBOL(pm_idle);
47 void (*pm_power_off)(void) = NULL;
48 EXPORT_SYMBOL(pm_power_off);
51 * The idle loop on BFIN
54 static void default_idle(void)__attribute__((l1_text));
55 void cpu_idle(void)__attribute__((l1_text));
59 * This is our default idle handler. We need to disable
60 * interrupts here to ensure we don't miss a wakeup call.
62 static void default_idle(void)
65 ipipe_suspend_domain();
67 hard_local_irq_disable();
69 idle_with_irq_disabled();
71 hard_local_irq_enable();
75 * The idle thread. We try to conserve power, while trying to keep
76 * overall latency low. The architecture specific idle is passed
77 * a value to indicate the level of "idleness" of the system.
81 /* endless idle loop with no priority at all */
83 void (*idle)(void) = pm_idle;
85 #ifdef CONFIG_HOTPLUG_CPU
86 if (cpu_is_offline(smp_processor_id()))
91 tick_nohz_idle_enter();
93 while (!need_resched())
96 tick_nohz_idle_exit();
97 preempt_enable_no_resched();
104 * This gets run with P1 containing the
105 * function to call, and R1 containing
106 * the "args". Note P0 is clobbered on the way here.
108 void kernel_thread_helper(void);
109 __asm__(".section .text\n"
111 "_kernel_thread_helper:\n\t"
113 "\tr0 = r1;\n\t" "\tcall (p1);\n\t" "\tcall _do_exit;\n" ".previous");
116 * Create a kernel thread.
118 pid_t kernel_thread(int (*fn) (void *), void *arg, unsigned long flags)
122 memset(®s, 0, sizeof(regs));
124 regs.r1 = (unsigned long)arg;
125 regs.p1 = (unsigned long)fn;
126 regs.pc = (unsigned long)kernel_thread_helper;
128 /* Set bit 2 to tell ret_from_fork we should be returning to kernel
131 __asm__ __volatile__("%0 = syscfg;":"=da"(regs.syscfg):);
132 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL,
135 EXPORT_SYMBOL(kernel_thread);
138 * Do necessary setup to start up a newly executed thread.
140 * pass the data segment into user programs if it exists,
141 * it can't hurt anything as far as I can tell
143 void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
147 regs->p5 = current->mm->start_data;
149 task_thread_info(current)->l1_task_info.stack_start =
150 (void *)current->mm->context.stack_start;
151 task_thread_info(current)->l1_task_info.lowest_sp = (void *)new_sp;
152 memcpy(L1_SCRATCH_TASK_INFO, &task_thread_info(current)->l1_task_info,
153 sizeof(*L1_SCRATCH_TASK_INFO));
157 EXPORT_SYMBOL_GPL(start_thread);
159 void flush_thread(void)
163 asmlinkage int bfin_vfork(struct pt_regs *regs)
165 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL,
169 asmlinkage int bfin_clone(struct pt_regs *regs)
171 unsigned long clone_flags;
174 #ifdef __ARCH_SYNC_CORE_DCACHE
175 if (current->rt.nr_cpus_allowed == num_possible_cpus())
176 set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
179 /* syscall2 puts clone_flags in r0 and usp in r1 */
180 clone_flags = regs->r0;
186 return do_fork(clone_flags, newsp, regs, 0, NULL, NULL);
190 copy_thread(unsigned long clone_flags,
191 unsigned long usp, unsigned long topstk,
192 struct task_struct *p, struct pt_regs *regs)
194 struct pt_regs *childregs;
196 childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
201 p->thread.ksp = (unsigned long)childregs;
202 p->thread.pc = (unsigned long)ret_from_fork;
208 * sys_execve() executes a new program.
210 asmlinkage int sys_execve(const char __user *name,
211 const char __user *const __user *argv,
212 const char __user *const __user *envp)
216 struct pt_regs *regs = (struct pt_regs *)((&name) + 6);
218 filename = getname(name);
219 error = PTR_ERR(filename);
220 if (IS_ERR(filename))
222 error = do_execve(filename, argv, envp, regs);
227 unsigned long get_wchan(struct task_struct *p)
229 unsigned long fp, pc;
230 unsigned long stack_page;
232 if (!p || p == current || p->state == TASK_RUNNING)
235 stack_page = (unsigned long)p;
238 if (fp < stack_page + sizeof(struct thread_info) ||
239 fp >= 8184 + stack_page)
241 pc = ((unsigned long *)fp)[1];
242 if (!in_sched_functions(pc))
244 fp = *(unsigned long *)fp;
246 while (count++ < 16);
250 void finish_atomic_sections (struct pt_regs *regs)
252 int __user *up0 = (int __user *)regs->p0;
256 /* not in middle of an atomic step, so resume like normal */
259 case ATOMIC_XCHG32 + 2:
260 put_user(regs->r1, up0);
263 case ATOMIC_CAS32 + 2:
264 case ATOMIC_CAS32 + 4:
265 if (regs->r0 == regs->r1)
266 case ATOMIC_CAS32 + 6:
267 put_user(regs->r2, up0);
270 case ATOMIC_ADD32 + 2:
271 regs->r0 = regs->r1 + regs->r0;
273 case ATOMIC_ADD32 + 4:
274 put_user(regs->r0, up0);
277 case ATOMIC_SUB32 + 2:
278 regs->r0 = regs->r1 - regs->r0;
280 case ATOMIC_SUB32 + 4:
281 put_user(regs->r0, up0);
284 case ATOMIC_IOR32 + 2:
285 regs->r0 = regs->r1 | regs->r0;
287 case ATOMIC_IOR32 + 4:
288 put_user(regs->r0, up0);
291 case ATOMIC_AND32 + 2:
292 regs->r0 = regs->r1 & regs->r0;
294 case ATOMIC_AND32 + 4:
295 put_user(regs->r0, up0);
298 case ATOMIC_XOR32 + 2:
299 regs->r0 = regs->r1 ^ regs->r0;
301 case ATOMIC_XOR32 + 4:
302 put_user(regs->r0, up0);
307 * We've finished the atomic section, and the only thing left for
308 * userspace is to do a RTS, so we might as well handle that too
309 * since we need to update the PC anyways.
311 regs->pc = regs->rets;
315 int in_mem(unsigned long addr, unsigned long size,
316 unsigned long start, unsigned long end)
318 return addr >= start && addr + size <= end;
321 int in_mem_const_off(unsigned long addr, unsigned long size, unsigned long off,
322 unsigned long const_addr, unsigned long const_size)
325 in_mem(addr, size, const_addr + off, const_addr + const_size);
328 int in_mem_const(unsigned long addr, unsigned long size,
329 unsigned long const_addr, unsigned long const_size)
331 return in_mem_const_off(addr, size, 0, const_addr, const_size);
333 #define ASYNC_ENABLED(bnum, bctlnum) \
335 (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? 0 : \
336 bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? 0 : \
340 * We can't read EBIU banks that aren't enabled or we end up hanging
341 * on the access to the async space. Make sure we validate accesses
342 * that cross async banks too.
343 * 0 - found, but unusable
348 int in_async(unsigned long addr, unsigned long size)
350 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE) {
351 if (!ASYNC_ENABLED(0, 0))
353 if (addr + size <= ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE)
355 size -= ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE - addr;
356 addr = ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE;
358 if (addr >= ASYNC_BANK1_BASE && addr < ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE) {
359 if (!ASYNC_ENABLED(1, 0))
361 if (addr + size <= ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE)
363 size -= ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE - addr;
364 addr = ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE;
366 if (addr >= ASYNC_BANK2_BASE && addr < ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE) {
367 if (!ASYNC_ENABLED(2, 1))
369 if (addr + size <= ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE)
371 size -= ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE - addr;
372 addr = ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE;
374 if (addr >= ASYNC_BANK3_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
375 if (ASYNC_ENABLED(3, 1))
377 if (addr + size <= ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE)
382 /* not within async bounds */
386 int bfin_mem_access_type(unsigned long addr, unsigned long size)
388 int cpu = raw_smp_processor_id();
390 /* Check that things do not wrap around */
391 if (addr > ULONG_MAX - size)
394 if (in_mem(addr, size, FIXED_CODE_START, physical_mem_end))
395 return BFIN_MEM_ACCESS_CORE;
397 if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
398 return cpu == 0 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
399 if (in_mem_const(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH))
400 return cpu == 0 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
401 if (in_mem_const(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH))
402 return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
403 if (in_mem_const(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
404 return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
405 #ifdef COREB_L1_CODE_START
406 if (in_mem_const(addr, size, COREB_L1_CODE_START, COREB_L1_CODE_LENGTH))
407 return cpu == 1 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
408 if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
409 return cpu == 1 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
410 if (in_mem_const(addr, size, COREB_L1_DATA_A_START, COREB_L1_DATA_A_LENGTH))
411 return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
412 if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
413 return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
415 if (in_mem_const(addr, size, L2_START, L2_LENGTH))
416 return BFIN_MEM_ACCESS_CORE;
418 if (addr >= SYSMMR_BASE)
419 return BFIN_MEM_ACCESS_CORE_ONLY;
421 switch (in_async(addr, size)) {
422 case 0: return -EFAULT;
423 case 1: return BFIN_MEM_ACCESS_CORE;
424 case 2: /* fall through */;
427 if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
428 return BFIN_MEM_ACCESS_CORE;
429 if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
430 return BFIN_MEM_ACCESS_DMA;
435 #if defined(CONFIG_ACCESS_CHECK)
436 #ifdef CONFIG_ACCESS_OK_L1
437 __attribute__((l1_text))
439 /* Return 1 if access to memory range is OK, 0 otherwise */
440 int _access_ok(unsigned long addr, unsigned long size)
446 /* Check that things do not wrap around */
447 if (addr > ULONG_MAX - size)
449 if (segment_eq(get_fs(), KERNEL_DS))
451 #ifdef CONFIG_MTD_UCLINUX
457 if (in_mem(addr, size, memory_start, memory_end))
459 if (in_mem(addr, size, memory_mtd_end, physical_mem_end))
461 # ifndef CONFIG_ROMFS_ON_MTD
464 /* For XIP, allow user space to use pointers within the ROMFS. */
465 if (in_mem(addr, size, memory_mtd_start, memory_mtd_end))
468 if (in_mem(addr, size, memory_start, physical_mem_end))
472 if (in_mem(addr, size, (unsigned long)__init_begin, (unsigned long)__init_end))
475 if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
477 if (in_mem_const_off(addr, size, _etext_l1 - _stext_l1, L1_CODE_START, L1_CODE_LENGTH))
479 if (in_mem_const_off(addr, size, _ebss_l1 - _sdata_l1, L1_DATA_A_START, L1_DATA_A_LENGTH))
481 if (in_mem_const_off(addr, size, _ebss_b_l1 - _sdata_b_l1, L1_DATA_B_START, L1_DATA_B_LENGTH))
483 #ifdef COREB_L1_CODE_START
484 if (in_mem_const(addr, size, COREB_L1_CODE_START, COREB_L1_CODE_LENGTH))
486 if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
488 if (in_mem_const(addr, size, COREB_L1_DATA_A_START, COREB_L1_DATA_A_LENGTH))
490 if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
494 #ifndef CONFIG_EXCEPTION_L1_SCRATCH
495 if (in_mem_const(addr, size, (unsigned long)l1_stack_base, l1_stack_len))
499 aret = in_async(addr, size);
503 if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH))
506 if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
508 if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
513 EXPORT_SYMBOL(_access_ok);
514 #endif /* CONFIG_ACCESS_CHECK */