2 * unaligned.c: Unaligned load/store trap handling with special
3 * cases for the kernel to do them more quickly.
5 * Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net)
6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10 #include <linux/jiffies.h>
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
14 #include <linux/extable.h>
16 #include <asm/ptrace.h>
17 #include <asm/pstate.h>
18 #include <asm/processor.h>
19 #include <linux/uaccess.h>
20 #include <linux/smp.h>
21 #include <linux/bitops.h>
22 #include <linux/perf_event.h>
23 #include <linux/ratelimit.h>
24 #include <linux/context_tracking.h>
25 #include <asm/fpumacro.h>
26 #include <asm/cacheflush.h>
27 #include <asm/setup.h>
33 load, /* ld, ldd, ldh, ldsh */
34 store, /* st, std, sth, stsh */
35 both, /* Swap, ldstub, cas, ... */
41 static inline enum direction decode_direction(unsigned int insn)
43 unsigned long tmp = (insn >> 21) & 1;
48 switch ((insn>>19)&0xf) {
57 /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
58 static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
62 tmp = ((insn >> 19) & 0xf);
63 if (tmp == 11 || tmp == 14) /* ldx/stx */
69 return 16; /* ldd/std - Although it is actually 8 */
73 printk("Impossible unaligned trap. insn=%08x\n", insn);
74 die_if_kernel("Byte sized unaligned access?!?!", regs);
76 /* GCC should never warn that control reaches the end
77 * of this function without returning a value because
78 * die_if_kernel() is marked with attribute 'noreturn'.
79 * Alas, some versions do...
86 static inline int decode_asi(unsigned int insn, struct pt_regs *regs)
88 if (insn & 0x800000) {
90 return (unsigned char)(regs->tstate >> 24); /* %asi */
92 return (unsigned char)(insn >> 5); /* imm_asi */
97 /* 0x400000 = signed, 0 = unsigned */
98 static inline int decode_signedness(unsigned int insn)
100 return (insn & 0x400000);
103 static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
104 unsigned int rd, int from_kernel)
106 if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
107 if (from_kernel != 0)
108 __asm__ __volatile__("flushw");
114 static inline long sign_extend_imm13(long imm)
116 return imm << 51 >> 51;
119 static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
121 unsigned long value, fp;
124 return (!reg ? 0 : regs->u_regs[reg]);
126 fp = regs->u_regs[UREG_FP];
128 if (regs->tstate & TSTATE_PRIV) {
129 struct reg_window *win;
130 win = (struct reg_window *)(fp + STACK_BIAS);
131 value = win->locals[reg - 16];
132 } else if (!test_thread_64bit_stack(fp)) {
133 struct reg_window32 __user *win32;
134 win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
135 get_user(value, &win32->locals[reg - 16]);
137 struct reg_window __user *win;
138 win = (struct reg_window __user *)(fp + STACK_BIAS);
139 get_user(value, &win->locals[reg - 16]);
144 static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
149 return ®s->u_regs[reg];
151 fp = regs->u_regs[UREG_FP];
153 if (regs->tstate & TSTATE_PRIV) {
154 struct reg_window *win;
155 win = (struct reg_window *)(fp + STACK_BIAS);
156 return &win->locals[reg - 16];
157 } else if (!test_thread_64bit_stack(fp)) {
158 struct reg_window32 *win32;
159 win32 = (struct reg_window32 *)((unsigned long)((u32)fp));
160 return (unsigned long *)&win32->locals[reg - 16];
162 struct reg_window *win;
163 win = (struct reg_window *)(fp + STACK_BIAS);
164 return &win->locals[reg - 16];
168 unsigned long compute_effective_address(struct pt_regs *regs,
169 unsigned int insn, unsigned int rd)
171 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
172 unsigned int rs1 = (insn >> 14) & 0x1f;
173 unsigned int rs2 = insn & 0x1f;
177 maybe_flush_windows(rs1, 0, rd, from_kernel);
178 addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
180 maybe_flush_windows(rs1, rs2, rd, from_kernel);
181 addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
184 if (!from_kernel && test_thread_flag(TIF_32BIT))
190 /* This is just to make gcc think die_if_kernel does return... */
191 static void __used unaligned_panic(char *str, struct pt_regs *regs)
193 die_if_kernel(str, regs);
196 extern int do_int_load(unsigned long *dest_reg, int size,
197 unsigned long *saddr, int is_signed, int asi);
199 extern int __do_int_store(unsigned long *dst_addr, int size,
200 unsigned long src_val, int asi);
202 static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
203 struct pt_regs *regs, int asi, int orig_asi)
205 unsigned long zero = 0;
206 unsigned long *src_val_p = &zero;
207 unsigned long src_val;
211 zero = (((long)(reg_num ?
212 (unsigned int)fetch_reg(reg_num, regs) : 0)) << 32) |
213 (unsigned int)fetch_reg(reg_num + 1, regs);
214 } else if (reg_num) {
215 src_val_p = fetch_reg_addr(reg_num, regs);
217 src_val = *src_val_p;
218 if (unlikely(asi != orig_asi)) {
221 src_val = swab16(src_val);
224 src_val = swab32(src_val);
227 src_val = swab64(src_val);
235 return __do_int_store(dst_addr, size, src_val, asi);
238 static inline void advance(struct pt_regs *regs)
240 regs->tpc = regs->tnpc;
242 if (test_thread_flag(TIF_32BIT)) {
243 regs->tpc &= 0xffffffff;
244 regs->tnpc &= 0xffffffff;
248 static inline int floating_point_load_or_store_p(unsigned int insn)
250 return (insn >> 24) & 1;
253 static inline int ok_for_kernel(unsigned int insn)
255 return !floating_point_load_or_store_p(insn);
258 static void kernel_mna_trap_fault(int fixup_tstate_asi)
260 struct pt_regs *regs = current_thread_info()->kern_una_regs;
261 unsigned int insn = current_thread_info()->kern_una_insn;
262 const struct exception_table_entry *entry;
264 entry = search_exception_tables(regs->tpc);
266 unsigned long address;
268 address = compute_effective_address(regs, insn,
269 ((insn >> 25) & 0x1f));
270 if (address < PAGE_SIZE) {
271 printk(KERN_ALERT "Unable to handle kernel NULL "
272 "pointer dereference in mna handler");
274 printk(KERN_ALERT "Unable to handle kernel paging "
275 "request in mna handler");
276 printk(KERN_ALERT " at virtual address %016lx\n",address);
277 printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n",
278 (current->mm ? CTX_HWBITS(current->mm->context) :
279 CTX_HWBITS(current->active_mm->context)));
280 printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n",
281 (current->mm ? (unsigned long) current->mm->pgd :
282 (unsigned long) current->active_mm->pgd));
283 die_if_kernel("Oops", regs);
286 regs->tpc = entry->fixup;
287 regs->tnpc = regs->tpc + 4;
289 if (fixup_tstate_asi) {
290 regs->tstate &= ~TSTATE_ASI;
291 regs->tstate |= (ASI_AIUS << 24UL);
295 static void log_unaligned(struct pt_regs *regs)
297 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
299 if (__ratelimit(&ratelimit)) {
300 printk("Kernel unaligned access at TPC[%lx] %pS\n",
301 regs->tpc, (void *) regs->tpc);
305 asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
307 enum direction dir = decode_direction(insn);
308 int size = decode_access_size(regs, insn);
311 current_thread_info()->kern_una_regs = regs;
312 current_thread_info()->kern_una_insn = insn;
314 orig_asi = asi = decode_asi(insn, regs);
316 /* If this is a {get,put}_user() on an unaligned userspace pointer,
317 * just signal a fault and do not log the event.
319 if (asi == ASI_AIUS) {
320 kernel_mna_trap_fault(0);
326 if (!ok_for_kernel(insn) || dir == both) {
327 printk("Unsupported unaligned load/store trap for kernel "
328 "at <%016lx>.\n", regs->tpc);
329 unaligned_panic("Kernel does fpu/atomic "
330 "unaligned load/store.", regs);
332 kernel_mna_trap_fault(0);
334 unsigned long addr, *reg_addr;
337 addr = compute_effective_address(regs, insn,
338 ((insn >> 25) & 0x1f));
339 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
353 reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
354 err = do_int_load(reg_addr, size,
355 (unsigned long *) addr,
356 decode_signedness(insn), asi);
357 if (likely(!err) && unlikely(asi != orig_asi)) {
358 unsigned long val_in = *reg_addr;
361 val_in = swab16(val_in);
364 val_in = swab32(val_in);
367 val_in = swab64(val_in);
379 err = do_int_store(((insn>>25)&0x1f), size,
380 (unsigned long *) addr, regs,
385 panic("Impossible kernel unaligned trap.");
389 kernel_mna_trap_fault(1);
395 int handle_popc(u32 insn, struct pt_regs *regs)
397 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
398 int ret, rd = ((insn >> 25) & 0x1f);
401 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
403 maybe_flush_windows(0, 0, rd, from_kernel);
404 value = sign_extend_imm13(insn);
406 maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
407 value = fetch_reg(insn & 0x1f, regs);
409 ret = hweight64(value);
412 regs->u_regs[rd] = ret;
414 unsigned long fp = regs->u_regs[UREG_FP];
416 if (!test_thread_64bit_stack(fp)) {
417 struct reg_window32 __user *win32;
418 win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
419 put_user(ret, &win32->locals[rd - 16]);
421 struct reg_window __user *win;
422 win = (struct reg_window __user *)(fp + STACK_BIAS);
423 put_user(ret, &win->locals[rd - 16]);
430 extern void do_fpother(struct pt_regs *regs);
431 extern void do_privact(struct pt_regs *regs);
432 extern void sun4v_data_access_exception(struct pt_regs *regs,
434 unsigned long type_ctx);
436 int handle_ldf_stq(u32 insn, struct pt_regs *regs)
438 unsigned long addr = compute_effective_address(regs, insn, 0);
440 struct fpustate *f = FPUSTATE;
441 int asi = decode_asi(insn, regs);
444 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
446 save_and_clear_fpu();
447 current_thread_info()->xfsr[0] &= ~0x1c000;
448 if (insn & 0x200000) {
450 u64 first = 0, second = 0;
452 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
453 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
455 current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
459 if (current_thread_info()->fpsaved[0] & flag) {
460 first = *(u64 *)&f->regs[freg];
461 second = *(u64 *)&f->regs[freg+2];
473 /* Need to convert endians */
474 u64 tmp = __swab64p(&first);
476 first = __swab64p(&second);
481 if (tlb_type == hypervisor)
482 sun4v_data_access_exception(regs, addr, 0);
484 spitfire_data_access_exception(regs, 0, addr);
487 if (put_user (first >> 32, (u32 __user *)addr) ||
488 __put_user ((u32)first, (u32 __user *)(addr + 4)) ||
489 __put_user (second >> 32, (u32 __user *)(addr + 8)) ||
490 __put_user ((u32)second, (u32 __user *)(addr + 12))) {
491 if (tlb_type == hypervisor)
492 sun4v_data_access_exception(regs, addr, 0);
494 spitfire_data_access_exception(regs, 0, addr);
498 /* LDF, LDDF, LDQF */
499 u32 data[4] __attribute__ ((aligned(8)));
506 } else if (asi > ASI_SNFL) {
507 if (tlb_type == hypervisor)
508 sun4v_data_access_exception(regs, addr, 0);
510 spitfire_data_access_exception(regs, 0, addr);
513 switch (insn & 0x180000) {
514 case 0x000000: size = 1; break;
515 case 0x100000: size = 4; break;
516 default: size = 2; break;
519 freg = (insn >> 25) & 0x1f;
521 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
522 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
524 for (i = 0; i < size; i++)
527 err = get_user (data[0], (u32 __user *) addr);
529 for (i = 1; i < size; i++)
530 err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
532 if (err && !(asi & 0x2 /* NF */)) {
533 if (tlb_type == hypervisor)
534 sun4v_data_access_exception(regs, addr, 0);
536 spitfire_data_access_exception(regs, 0, addr);
539 if (asi & 0x8) /* Little */ {
543 case 1: data[0] = le32_to_cpup(data + 0); break;
544 default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
546 case 4: tmp = le64_to_cpup((u64 *)(data + 0));
547 *(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
548 *(u64 *)(data + 2) = tmp;
552 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
553 current_thread_info()->fpsaved[0] = FPRS_FEF;
554 current_thread_info()->gsr[0] = 0;
556 if (!(current_thread_info()->fpsaved[0] & flag)) {
558 memset(f->regs, 0, 32*sizeof(u32));
560 memset(f->regs+32, 0, 32*sizeof(u32));
562 memcpy(f->regs + freg, data, size * 4);
563 current_thread_info()->fpsaved[0] |= flag;
569 void handle_ld_nf(u32 insn, struct pt_regs *regs)
571 int rd = ((insn >> 25) & 0x1f);
572 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
575 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
577 maybe_flush_windows(0, 0, rd, from_kernel);
578 reg = fetch_reg_addr(rd, regs);
579 if (from_kernel || rd < 16) {
581 if ((insn & 0x780000) == 0x180000)
583 } else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) {
584 put_user(0, (int __user *) reg);
585 if ((insn & 0x780000) == 0x180000)
586 put_user(0, ((int __user *) reg) + 1);
588 put_user(0, (unsigned long __user *) reg);
589 if ((insn & 0x780000) == 0x180000)
590 put_user(0, (unsigned long __user *) reg + 1);
595 void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
597 enum ctx_state prev_state = exception_enter();
598 unsigned long pc = regs->tpc;
599 unsigned long tstate = regs->tstate;
604 struct fpustate *f = FPUSTATE;
606 if (tstate & TSTATE_PRIV)
607 die_if_kernel("lddfmna from kernel", regs);
608 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
609 if (test_thread_flag(TIF_32BIT))
611 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
612 int asi = decode_asi(insn, regs);
616 if ((asi > ASI_SNFL) ||
620 err = get_user(first, (u32 __user *)sfar);
622 err = get_user(second, (u32 __user *)(sfar + 4));
628 save_and_clear_fpu();
629 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
630 value = (((u64)first) << 32) | second;
631 if (asi & 0x8) /* Little */
632 value = __swab64p(&value);
633 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
634 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
635 current_thread_info()->fpsaved[0] = FPRS_FEF;
636 current_thread_info()->gsr[0] = 0;
638 if (!(current_thread_info()->fpsaved[0] & flag)) {
640 memset(f->regs, 0, 32*sizeof(u32));
642 memset(f->regs+32, 0, 32*sizeof(u32));
644 *(u64 *)(f->regs + freg) = value;
645 current_thread_info()->fpsaved[0] |= flag;
648 if (tlb_type == hypervisor)
649 sun4v_data_access_exception(regs, sfar, sfsr);
651 spitfire_data_access_exception(regs, sfsr, sfar);
656 exception_exit(prev_state);
659 void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
661 enum ctx_state prev_state = exception_enter();
662 unsigned long pc = regs->tpc;
663 unsigned long tstate = regs->tstate;
668 struct fpustate *f = FPUSTATE;
670 if (tstate & TSTATE_PRIV)
671 die_if_kernel("stdfmna from kernel", regs);
672 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
673 if (test_thread_flag(TIF_32BIT))
675 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
676 int asi = decode_asi(insn, regs);
677 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
679 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
680 if ((asi > ASI_SNFL) ||
683 save_and_clear_fpu();
684 if (current_thread_info()->fpsaved[0] & flag)
685 value = *(u64 *)&f->regs[freg];
691 value = __swab64p(&value); break;
694 if (put_user (value >> 32, (u32 __user *) sfar) ||
695 __put_user ((u32)value, (u32 __user *)(sfar + 4)))
699 if (tlb_type == hypervisor)
700 sun4v_data_access_exception(regs, sfar, sfsr);
702 spitfire_data_access_exception(regs, sfsr, sfar);
707 exception_exit(prev_state);