2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004 Thiemo Seufer
10 * Copyright (C) 2013 Imagination Technologies Ltd.
12 #include <linux/errno.h>
13 #include <linux/sched.h>
14 #include <linux/tick.h>
15 #include <linux/kernel.h>
17 #include <linux/stddef.h>
18 #include <linux/unistd.h>
19 #include <linux/export.h>
20 #include <linux/ptrace.h>
21 #include <linux/mman.h>
22 #include <linux/personality.h>
23 #include <linux/sys.h>
24 #include <linux/init.h>
25 #include <linux/completion.h>
26 #include <linux/kallsyms.h>
27 #include <linux/random.h>
28 #include <linux/prctl.h>
31 #include <asm/bootinfo.h>
33 #include <asm/dsemul.h>
38 #include <asm/pgtable.h>
39 #include <asm/mipsregs.h>
40 #include <asm/processor.h>
42 #include <linux/uaccess.h>
45 #include <asm/isadep.h>
47 #include <asm/stacktrace.h>
48 #include <asm/irq_regs.h>
50 #ifdef CONFIG_HOTPLUG_CPU
51 void arch_cpu_idle_dead(void)
53 /* What the heck is this check doing ? */
54 if (!cpumask_test_cpu(smp_processor_id(), &cpu_callin_map))
59 asmlinkage void ret_from_fork(void);
60 asmlinkage void ret_from_kernel_thread(void);
62 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
66 /* New thread loses kernel privileges. */
67 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
69 regs->cp0_status = status;
71 clear_thread_flag(TIF_MSA_CTX_LIVE);
73 atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE);
79 void exit_thread(struct task_struct *tsk)
82 * User threads may have allocated a delay slot emulation frame.
83 * If so, clean up that allocation.
85 if (!(current->flags & PF_KTHREAD))
86 dsemul_thread_cleanup(tsk);
89 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
92 * Save any process state which is live in hardware registers to the
93 * parent context prior to duplication. This prevents the new child
94 * state becoming stale if the parent is preempted before copy_thread()
95 * gets a chance to save the parent's live hardware registers to the
100 if (is_msa_enabled())
102 else if (is_fpu_owner())
114 * Copy architecture-specific thread state
116 int copy_thread(unsigned long clone_flags, unsigned long usp,
117 unsigned long kthread_arg, struct task_struct *p)
119 struct thread_info *ti = task_thread_info(p);
120 struct pt_regs *childregs, *regs = current_pt_regs();
121 unsigned long childksp;
122 p->set_child_tid = p->clear_child_tid = NULL;
124 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
126 /* set up new TSS. */
127 childregs = (struct pt_regs *) childksp - 1;
128 /* Put the stack after the struct pt_regs. */
129 childksp = (unsigned long) childregs;
130 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
131 if (unlikely(p->flags & PF_KTHREAD)) {
133 unsigned long status = p->thread.cp0_status;
134 memset(childregs, 0, sizeof(struct pt_regs));
135 ti->addr_limit = KERNEL_DS;
136 p->thread.reg16 = usp; /* fn */
137 p->thread.reg17 = kthread_arg;
138 p->thread.reg29 = childksp;
139 p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
140 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
141 status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
142 ((status & (ST0_KUC | ST0_IEC)) << 2);
146 childregs->cp0_status = status;
152 childregs->regs[7] = 0; /* Clear error flag */
153 childregs->regs[2] = 0; /* Child gets zero as return value */
155 childregs->regs[29] = usp;
156 ti->addr_limit = USER_DS;
158 p->thread.reg29 = (unsigned long) childregs;
159 p->thread.reg31 = (unsigned long) ret_from_fork;
162 * New tasks lose permission to use the fpu. This accelerates context
163 * switching for most programs since they don't use the fpu.
165 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
167 clear_tsk_thread_flag(p, TIF_USEDFPU);
168 clear_tsk_thread_flag(p, TIF_USEDMSA);
169 clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
171 #ifdef CONFIG_MIPS_MT_FPAFF
172 clear_tsk_thread_flag(p, TIF_FPUBOUND);
173 #endif /* CONFIG_MIPS_MT_FPAFF */
175 atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
177 if (clone_flags & CLONE_SETTLS)
178 ti->tp_value = regs->regs[7];
183 #ifdef CONFIG_CC_STACKPROTECTOR
184 #include <linux/stackprotector.h>
185 unsigned long __stack_chk_guard __read_mostly;
186 EXPORT_SYMBOL(__stack_chk_guard);
189 struct mips_frame_info {
191 unsigned long func_size;
196 #define J_TARGET(pc,target) \
197 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
199 static inline int is_ra_save_ins(union mips_instruction *ip)
201 #ifdef CONFIG_CPU_MICROMIPS
204 * swm16 reglist,offset(sp)
205 * swm32 reglist,offset(sp)
207 * jradiussp - NOT SUPPORTED
209 * microMIPS is way more fun...
211 if (mm_insn_16bit(ip->halfword[1])) {
212 return (ip->mm16_r5_format.opcode == mm_swsp16_op &&
213 ip->mm16_r5_format.rt == 31) ||
214 (ip->mm16_m_format.opcode == mm_pool16c_op &&
215 ip->mm16_m_format.func == mm_swm16_op);
218 return (ip->mm_m_format.opcode == mm_pool32b_op &&
219 ip->mm_m_format.rd > 9 &&
220 ip->mm_m_format.base == 29 &&
221 ip->mm_m_format.func == mm_swm32_func) ||
222 (ip->i_format.opcode == mm_sw32_op &&
223 ip->i_format.rs == 29 &&
224 ip->i_format.rt == 31);
227 /* sw / sd $ra, offset($sp) */
228 return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
229 ip->i_format.rs == 29 &&
230 ip->i_format.rt == 31;
234 static inline int is_jump_ins(union mips_instruction *ip)
236 #ifdef CONFIG_CPU_MICROMIPS
238 * jr16,jrc,jalr16,jalr16
240 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
241 * jraddiusp - NOT SUPPORTED
243 * microMIPS is kind of more fun...
245 if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
246 (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) ||
247 ip->j_format.opcode == mm_jal32_op)
249 if (ip->r_format.opcode != mm_pool32a_op ||
250 ip->r_format.func != mm_pool32axf_op)
252 return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
254 if (ip->j_format.opcode == j_op)
256 if (ip->j_format.opcode == jal_op)
258 if (ip->r_format.opcode != spec_op)
260 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
264 static inline int is_sp_move_ins(union mips_instruction *ip)
266 #ifdef CONFIG_CPU_MICROMIPS
271 * jradiussp - NOT SUPPORTED
273 * microMIPS is not more fun...
275 if (mm_insn_16bit(ip->halfword[1])) {
276 return (ip->mm16_r3_format.opcode == mm_pool16d_op &&
277 ip->mm16_r3_format.simmediate && mm_addiusp_func) ||
278 (ip->mm16_r5_format.opcode == mm_pool16d_op &&
279 ip->mm16_r5_format.rt == 29);
282 return ip->mm_i_format.opcode == mm_addiu32_op &&
283 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29;
285 /* addiu/daddiu sp,sp,-imm */
286 if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
288 if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
294 static int get_frame_info(struct mips_frame_info *info)
296 bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
297 union mips_instruction insn, *ip, *ip_end;
298 const unsigned int max_insns = 128;
301 info->pc_offset = -1;
302 info->frame_size = 0;
304 ip = (void *)msk_isa16_mode((ulong)info->func);
308 ip_end = (void *)ip + info->func_size;
310 for (i = 0; i < max_insns && ip < ip_end; i++, ip++) {
311 if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
312 insn.halfword[0] = 0;
313 insn.halfword[1] = ip->halfword[0];
314 } else if (is_mmips) {
315 insn.halfword[0] = ip->halfword[1];
316 insn.halfword[1] = ip->halfword[0];
318 insn.word = ip->word;
321 if (is_jump_ins(&insn))
324 if (!info->frame_size) {
325 if (is_sp_move_ins(&insn))
327 #ifdef CONFIG_CPU_MICROMIPS
328 if (mm_insn_16bit(ip->halfword[0]))
332 if (ip->halfword[0] & mm_addiusp_func)
334 tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
335 info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
337 tmp = (ip->halfword[0] >> 1);
338 info->frame_size = -(signed short)(tmp & 0xf);
340 ip = (void *) &ip->halfword[1];
344 info->frame_size = - ip->i_format.simmediate;
348 if (info->pc_offset == -1 && is_ra_save_ins(&insn)) {
350 ip->i_format.simmediate / sizeof(long);
354 if (info->frame_size && info->pc_offset >= 0) /* nested */
356 if (info->pc_offset < 0) /* leaf */
358 /* prologue seems bogus... */
363 static struct mips_frame_info schedule_mfi __read_mostly;
365 #ifdef CONFIG_KALLSYMS
366 static unsigned long get___schedule_addr(void)
368 return kallsyms_lookup_name("__schedule");
371 static unsigned long get___schedule_addr(void)
373 union mips_instruction *ip = (void *)schedule;
377 for (i = 0; i < max_insns; i++, ip++) {
378 if (ip->j_format.opcode == j_op)
379 return J_TARGET(ip, ip->j_format.target);
385 static int __init frame_info_init(void)
387 unsigned long size = 0;
388 #ifdef CONFIG_KALLSYMS
393 addr = get___schedule_addr();
395 addr = (unsigned long)schedule;
397 #ifdef CONFIG_KALLSYMS
398 kallsyms_lookup_size_offset(addr, &size, &ofs);
400 schedule_mfi.func = (void *)addr;
401 schedule_mfi.func_size = size;
403 get_frame_info(&schedule_mfi);
406 * Without schedule() frame info, result given by
407 * thread_saved_pc() and get_wchan() are not reliable.
409 if (schedule_mfi.pc_offset < 0)
410 printk("Can't analyze schedule() prologue at %p\n", schedule);
415 arch_initcall(frame_info_init);
418 * Return saved PC of a blocked thread.
420 unsigned long thread_saved_pc(struct task_struct *tsk)
422 struct thread_struct *t = &tsk->thread;
424 /* New born processes are a special case */
425 if (t->reg31 == (unsigned long) ret_from_fork)
427 if (schedule_mfi.pc_offset < 0)
429 return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
433 #ifdef CONFIG_KALLSYMS
434 /* generic stack unwinding function */
435 unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
440 struct mips_frame_info info;
441 unsigned long size, ofs;
443 extern void ret_from_irq(void);
444 extern void ret_from_exception(void);
450 * If we reached the bottom of interrupt context,
451 * return saved pc in pt_regs.
453 if (pc == (unsigned long)ret_from_irq ||
454 pc == (unsigned long)ret_from_exception) {
455 struct pt_regs *regs;
456 if (*sp >= stack_page &&
457 *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
458 regs = (struct pt_regs *)*sp;
460 if (!user_mode(regs) && __kernel_text_address(pc)) {
461 *sp = regs->regs[29];
462 *ra = regs->regs[31];
468 if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
471 * Return ra if an exception occurred at the first instruction
473 if (unlikely(ofs == 0)) {
479 info.func = (void *)(pc - ofs);
480 info.func_size = ofs; /* analyze from start to ofs */
481 leaf = get_frame_info(&info);
485 if (*sp < stack_page ||
486 *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
491 * For some extreme cases, get_frame_info() can
492 * consider wrongly a nested function as a leaf
493 * one. In that cases avoid to return always the
496 pc = pc != *ra ? *ra : 0;
498 pc = ((unsigned long *)(*sp))[info.pc_offset];
500 *sp += info.frame_size;
502 return __kernel_text_address(pc) ? pc : 0;
504 EXPORT_SYMBOL(unwind_stack_by_address);
506 /* used by show_backtrace() */
507 unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
508 unsigned long pc, unsigned long *ra)
510 unsigned long stack_page = 0;
513 for_each_possible_cpu(cpu) {
514 if (on_irq_stack(cpu, *sp)) {
515 stack_page = (unsigned long)irq_stack[cpu];
521 stack_page = (unsigned long)task_stack_page(task);
523 return unwind_stack_by_address(stack_page, sp, pc, ra);
528 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
530 unsigned long get_wchan(struct task_struct *task)
532 unsigned long pc = 0;
533 #ifdef CONFIG_KALLSYMS
535 unsigned long ra = 0;
538 if (!task || task == current || task->state == TASK_RUNNING)
540 if (!task_stack_page(task))
543 pc = thread_saved_pc(task);
545 #ifdef CONFIG_KALLSYMS
546 sp = task->thread.reg29 + schedule_mfi.frame_size;
548 while (in_sched_functions(pc))
549 pc = unwind_stack(task, &sp, pc, &ra);
557 * Don't forget that the stack pointer must be aligned on a 8 bytes
558 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
560 unsigned long arch_align_stack(unsigned long sp)
562 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
563 sp -= get_random_int() & ~PAGE_MASK;
568 static void arch_dump_stack(void *info)
570 struct pt_regs *regs;
572 regs = get_irq_regs();
580 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
582 long this_cpu = get_cpu();
584 if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
587 smp_call_function_many(mask, arch_dump_stack, NULL, 1);
592 int mips_get_process_fp_mode(struct task_struct *task)
596 if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
597 value |= PR_FP_MODE_FR;
598 if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
599 value |= PR_FP_MODE_FRE;
604 static void prepare_for_fp_mode_switch(void *info)
606 struct mm_struct *mm = info;
608 if (current->mm == mm)
612 int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
614 const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
615 struct task_struct *t;
618 /* Check the value is valid */
619 if (value & ~known_bits)
622 /* Avoid inadvertently triggering emulation */
623 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
624 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
626 if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
629 /* FR = 0 not supported in MIPS R6 */
630 if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
633 /* Proceed with the mode switch */
636 /* Save FP & vector context, then disable FPU & MSA */
637 if (task->signal == current->signal)
640 /* Prevent any threads from obtaining live FP context */
641 atomic_set(&task->mm->context.fp_mode_switching, 1);
642 smp_mb__after_atomic();
645 * If there are multiple online CPUs then force any which are running
646 * threads in this process to lose their FPU context, which they can't
647 * regain until fp_mode_switching is cleared later.
649 if (num_online_cpus() > 1) {
650 /* No need to send an IPI for the local CPU */
651 max_users = (task->mm == current->mm) ? 1 : 0;
653 if (atomic_read(¤t->mm->mm_users) > max_users)
654 smp_call_function(prepare_for_fp_mode_switch,
655 (void *)current->mm, 1);
659 * There are now no threads of the process with live FP context, so it
660 * is safe to proceed with the FP mode switch.
662 for_each_thread(task, t) {
663 /* Update desired FP register width */
664 if (value & PR_FP_MODE_FR) {
665 clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
667 set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
668 clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
671 /* Update desired FP single layout */
672 if (value & PR_FP_MODE_FRE)
673 set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
675 clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
678 /* Allow threads to use FP again */
679 atomic_set(&task->mm->context.fp_mode_switching, 0);