]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/blackfin/kernel/process.c
4359ea25301060b947482080f61029fe80e084d3
[karo-tx-linux.git] / arch / blackfin / kernel / process.c
1 /*
2  * File:         arch/blackfin/kernel/process.c
3  * Based on:
4  * Author:
5  *
6  * Created:
7  * Description:  Blackfin architecture-dependent process handling.
8  *
9  * Modified:
10  *               Copyright 2004-2006 Analog Devices Inc.
11  *
12  * Bugs:         Enter bugs at http://blackfin.uclinux.org/
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2 of the License, or
17  * (at your option) any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, see the file COPYING, or write
26  * to the Free Software Foundation, Inc.,
27  * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
28  */
29
30 #include <linux/module.h>
31 #include <linux/smp_lock.h>
32 #include <linux/unistd.h>
33 #include <linux/user.h>
34 #include <linux/uaccess.h>
35 #include <linux/sched.h>
36 #include <linux/tick.h>
37 #include <linux/fs.h>
38 #include <linux/err.h>
39
40 #include <asm/blackfin.h>
41 #include <asm/fixed_code.h>
42
43 asmlinkage void ret_from_fork(void);
44
45 /* Points to the SDRAM backup memory for the stack that is currently in
46  * L1 scratchpad memory.
47  */
48 void *current_l1_stack_save;
49
50 /* The number of tasks currently using a L1 stack area.  The SRAM is
51  * allocated/deallocated whenever this changes from/to zero.
52  */
53 int nr_l1stack_tasks;
54
55 /* Start and length of the area in L1 scratchpad memory which we've allocated
56  * for process stacks.
57  */
58 void *l1_stack_base;
59 unsigned long l1_stack_len;
60
61 /*
62  * Powermanagement idle function, if any..
63  */
64 void (*pm_idle)(void) = NULL;
65 EXPORT_SYMBOL(pm_idle);
66
67 void (*pm_power_off)(void) = NULL;
68 EXPORT_SYMBOL(pm_power_off);
69
70 /*
71  * The idle loop on BFIN
72  */
73 #ifdef CONFIG_IDLE_L1
74 static void default_idle(void)__attribute__((l1_text));
75 void cpu_idle(void)__attribute__((l1_text));
76 #endif
77
78 /*
79  * This is our default idle handler.  We need to disable
80  * interrupts here to ensure we don't miss a wakeup call.
81  */
82 static void default_idle(void)
83 {
84         local_irq_disable();
85         if (!need_resched())
86                 idle_with_irq_disabled();
87
88         local_irq_enable();
89 }
90
91 /*
92  * The idle thread.  We try to conserve power, while trying to keep
93  * overall latency low.  The architecture specific idle is passed
94  * a value to indicate the level of "idleness" of the system.
95  */
96 void cpu_idle(void)
97 {
98         /* endless idle loop with no priority at all */
99         while (1) {
100                 void (*idle)(void) = pm_idle;
101
102 #ifdef CONFIG_HOTPLUG_CPU
103                 if (cpu_is_offline(smp_processor_id()))
104                         cpu_die();
105 #endif
106                 if (!idle)
107                         idle = default_idle;
108                 tick_nohz_stop_sched_tick(1);
109                 while (!need_resched())
110                         idle();
111                 tick_nohz_restart_sched_tick();
112                 preempt_enable_no_resched();
113                 schedule();
114                 preempt_disable();
115         }
116 }
117
118 /* Fill in the fpu structure for a core dump.  */
119
120 int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpregs)
121 {
122         return 1;
123 }
124
125 /*
126  * This gets run with P1 containing the
127  * function to call, and R1 containing
128  * the "args".  Note P0 is clobbered on the way here.
129  */
130 void kernel_thread_helper(void);
131 __asm__(".section .text\n"
132         ".align 4\n"
133         "_kernel_thread_helper:\n\t"
134         "\tsp += -12;\n\t"
135         "\tr0 = r1;\n\t" "\tcall (p1);\n\t" "\tcall _do_exit;\n" ".previous");
136
137 /*
138  * Create a kernel thread.
139  */
140 pid_t kernel_thread(int (*fn) (void *), void *arg, unsigned long flags)
141 {
142         struct pt_regs regs;
143
144         memset(&regs, 0, sizeof(regs));
145
146         regs.r1 = (unsigned long)arg;
147         regs.p1 = (unsigned long)fn;
148         regs.pc = (unsigned long)kernel_thread_helper;
149         regs.orig_p0 = -1;
150         /* Set bit 2 to tell ret_from_fork we should be returning to kernel
151            mode.  */
152         regs.ipend = 0x8002;
153         __asm__ __volatile__("%0 = syscfg;":"=da"(regs.syscfg):);
154         return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL,
155                        NULL);
156 }
157 EXPORT_SYMBOL(kernel_thread);
158
159 void flush_thread(void)
160 {
161 }
162
163 asmlinkage int bfin_vfork(struct pt_regs *regs)
164 {
165         return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL,
166                        NULL);
167 }
168
169 asmlinkage int bfin_clone(struct pt_regs *regs)
170 {
171         unsigned long clone_flags;
172         unsigned long newsp;
173
174 #ifdef __ARCH_SYNC_CORE_DCACHE
175         if (current->rt.nr_cpus_allowed == num_possible_cpus()) {
176                 current->cpus_allowed = cpumask_of_cpu(smp_processor_id());
177                 current->rt.nr_cpus_allowed = 1;
178         }
179 #endif
180
181         /* syscall2 puts clone_flags in r0 and usp in r1 */
182         clone_flags = regs->r0;
183         newsp = regs->r1;
184         if (!newsp)
185                 newsp = rdusp();
186         else
187                 newsp -= 12;
188         return do_fork(clone_flags, newsp, regs, 0, NULL, NULL);
189 }
190
191 int
192 copy_thread(int nr, unsigned long clone_flags,
193             unsigned long usp, unsigned long topstk,
194             struct task_struct *p, struct pt_regs *regs)
195 {
196         struct pt_regs *childregs;
197
198         childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
199         *childregs = *regs;
200         childregs->r0 = 0;
201
202         p->thread.usp = usp;
203         p->thread.ksp = (unsigned long)childregs;
204         p->thread.pc = (unsigned long)ret_from_fork;
205
206         return 0;
207 }
208
209 /*
210  * sys_execve() executes a new program.
211  */
212
213 asmlinkage int sys_execve(char __user *name, char __user * __user *argv, char __user * __user *envp)
214 {
215         int error;
216         char *filename;
217         struct pt_regs *regs = (struct pt_regs *)((&name) + 6);
218
219         lock_kernel();
220         filename = getname(name);
221         error = PTR_ERR(filename);
222         if (IS_ERR(filename))
223                 goto out;
224         error = do_execve(filename, argv, envp, regs);
225         putname(filename);
226  out:
227         unlock_kernel();
228         return error;
229 }
230
231 unsigned long get_wchan(struct task_struct *p)
232 {
233         unsigned long fp, pc;
234         unsigned long stack_page;
235         int count = 0;
236         if (!p || p == current || p->state == TASK_RUNNING)
237                 return 0;
238
239         stack_page = (unsigned long)p;
240         fp = p->thread.usp;
241         do {
242                 if (fp < stack_page + sizeof(struct thread_info) ||
243                     fp >= 8184 + stack_page)
244                         return 0;
245                 pc = ((unsigned long *)fp)[1];
246                 if (!in_sched_functions(pc))
247                         return pc;
248                 fp = *(unsigned long *)fp;
249         }
250         while (count++ < 16);
251         return 0;
252 }
253
254 void finish_atomic_sections (struct pt_regs *regs)
255 {
256         int __user *up0 = (int __user *)regs->p0;
257
258         if (regs->pc < ATOMIC_SEQS_START || regs->pc >= ATOMIC_SEQS_END)
259                 return;
260
261         switch (regs->pc) {
262         case ATOMIC_XCHG32 + 2:
263                 put_user(regs->r1, up0);
264                 regs->pc += 2;
265                 break;
266
267         case ATOMIC_CAS32 + 2:
268         case ATOMIC_CAS32 + 4:
269                 if (regs->r0 == regs->r1)
270                         put_user(regs->r2, up0);
271                 regs->pc = ATOMIC_CAS32 + 8;
272                 break;
273         case ATOMIC_CAS32 + 6:
274                 put_user(regs->r2, up0);
275                 regs->pc += 2;
276                 break;
277
278         case ATOMIC_ADD32 + 2:
279                 regs->r0 = regs->r1 + regs->r0;
280                 /* fall through */
281         case ATOMIC_ADD32 + 4:
282                 put_user(regs->r0, up0);
283                 regs->pc = ATOMIC_ADD32 + 6;
284                 break;
285
286         case ATOMIC_SUB32 + 2:
287                 regs->r0 = regs->r1 - regs->r0;
288                 /* fall through */
289         case ATOMIC_SUB32 + 4:
290                 put_user(regs->r0, up0);
291                 regs->pc = ATOMIC_SUB32 + 6;
292                 break;
293
294         case ATOMIC_IOR32 + 2:
295                 regs->r0 = regs->r1 | regs->r0;
296                 /* fall through */
297         case ATOMIC_IOR32 + 4:
298                 put_user(regs->r0, up0);
299                 regs->pc = ATOMIC_IOR32 + 6;
300                 break;
301
302         case ATOMIC_AND32 + 2:
303                 regs->r0 = regs->r1 & regs->r0;
304                 /* fall through */
305         case ATOMIC_AND32 + 4:
306                 put_user(regs->r0, up0);
307                 regs->pc = ATOMIC_AND32 + 6;
308                 break;
309
310         case ATOMIC_XOR32 + 2:
311                 regs->r0 = regs->r1 ^ regs->r0;
312                 /* fall through */
313         case ATOMIC_XOR32 + 4:
314                 put_user(regs->r0, up0);
315                 regs->pc = ATOMIC_XOR32 + 6;
316                 break;
317         }
318 }
319
320 #if defined(CONFIG_ACCESS_CHECK)
321 /* Return 1 if access to memory range is OK, 0 otherwise */
322 int _access_ok(unsigned long addr, unsigned long size)
323 {
324         if (size == 0)
325                 return 1;
326         if (addr > (addr + size))
327                 return 0;
328         if (segment_eq(get_fs(), KERNEL_DS))
329                 return 1;
330 #ifdef CONFIG_MTD_UCLINUX
331         if (addr >= memory_start && (addr + size) <= memory_end)
332                 return 1;
333         if (addr >= memory_mtd_end && (addr + size) <= physical_mem_end)
334                 return 1;
335
336 #ifdef CONFIG_ROMFS_MTD_FS
337         /* For XIP, allow user space to use pointers within the ROMFS.  */
338         if (addr >= memory_mtd_start && (addr + size) <= memory_mtd_end)
339                 return 1;
340 #endif
341 #else
342         if (addr >= memory_start && (addr + size) <= physical_mem_end)
343                 return 1;
344 #endif
345         if (addr >= (unsigned long)__init_begin &&
346             addr + size <= (unsigned long)__init_end)
347                 return 1;
348         if (addr >= get_l1_scratch_start()
349             && addr + size <= get_l1_scratch_start() + L1_SCRATCH_LENGTH)
350                 return 1;
351 #if L1_CODE_LENGTH != 0
352         if (addr >= get_l1_code_start() + (_etext_l1 - _stext_l1)
353             && addr + size <= get_l1_code_start() + L1_CODE_LENGTH)
354                 return 1;
355 #endif
356 #if L1_DATA_A_LENGTH != 0
357         if (addr >= get_l1_data_a_start() + (_ebss_l1 - _sdata_l1)
358             && addr + size <= get_l1_data_a_start() + L1_DATA_A_LENGTH)
359                 return 1;
360 #endif
361 #if L1_DATA_B_LENGTH != 0
362         if (addr >= get_l1_data_b_start() + (_ebss_b_l1 - _sdata_b_l1)
363             && addr + size <= get_l1_data_b_start() + L1_DATA_B_LENGTH)
364                 return 1;
365 #endif
366 #if L2_LENGTH != 0
367         if (addr >= L2_START + (_ebss_l2 - _stext_l2)
368             && addr + size <= L2_START + L2_LENGTH)
369                 return 1;
370 #endif
371         return 0;
372 }
373 EXPORT_SYMBOL(_access_ok);
374 #endif /* CONFIG_ACCESS_CHECK */