2 * Copyright (C) 2013 Huawei Ltd.
3 * Author: Jiang Liu <liuj97@gmail.com>
5 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/stop_machine.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
30 #include <asm/cacheflush.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/fixmap.h>
35 #define AARCH64_INSN_SF_BIT BIT(31)
36 #define AARCH64_INSN_N_BIT BIT(22)
38 static int aarch64_insn_encoding_class[] = {
39 AARCH64_INSN_CLS_UNKNOWN,
40 AARCH64_INSN_CLS_UNKNOWN,
41 AARCH64_INSN_CLS_UNKNOWN,
42 AARCH64_INSN_CLS_UNKNOWN,
43 AARCH64_INSN_CLS_LDST,
44 AARCH64_INSN_CLS_DP_REG,
45 AARCH64_INSN_CLS_LDST,
46 AARCH64_INSN_CLS_DP_FPSIMD,
47 AARCH64_INSN_CLS_DP_IMM,
48 AARCH64_INSN_CLS_DP_IMM,
49 AARCH64_INSN_CLS_BR_SYS,
50 AARCH64_INSN_CLS_BR_SYS,
51 AARCH64_INSN_CLS_LDST,
52 AARCH64_INSN_CLS_DP_REG,
53 AARCH64_INSN_CLS_LDST,
54 AARCH64_INSN_CLS_DP_FPSIMD,
57 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
59 return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
62 /* NOP is an alias of HINT */
63 bool __kprobes aarch64_insn_is_nop(u32 insn)
65 if (!aarch64_insn_is_hint(insn))
68 switch (insn & 0xFE0) {
69 case AARCH64_INSN_HINT_YIELD:
70 case AARCH64_INSN_HINT_WFE:
71 case AARCH64_INSN_HINT_WFI:
72 case AARCH64_INSN_HINT_SEV:
73 case AARCH64_INSN_HINT_SEVL:
80 bool aarch64_insn_is_branch_imm(u32 insn)
82 return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
83 aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
84 aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
85 aarch64_insn_is_bcond(insn));
88 static DEFINE_RAW_SPINLOCK(patch_lock);
90 static void __kprobes *patch_map(void *addr, int fixmap)
92 unsigned long uintaddr = (uintptr_t) addr;
93 bool module = !core_kernel_text(uintaddr);
96 if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
97 page = vmalloc_to_page(addr);
99 page = pfn_to_page(PHYS_PFN(__pa(addr)));
104 return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
105 (uintaddr & ~PAGE_MASK));
108 static void __kprobes patch_unmap(int fixmap)
110 clear_fixmap(fixmap);
113 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
116 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
121 ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
123 *insnp = le32_to_cpu(val);
128 static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
131 unsigned long flags = 0;
134 raw_spin_lock_irqsave(&patch_lock, flags);
135 waddr = patch_map(addr, FIX_TEXT_POKE0);
137 ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
139 patch_unmap(FIX_TEXT_POKE0);
140 raw_spin_unlock_irqrestore(&patch_lock, flags);
145 int __kprobes aarch64_insn_write(void *addr, u32 insn)
147 insn = cpu_to_le32(insn);
148 return __aarch64_insn_write(addr, insn);
151 static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
153 if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
156 return aarch64_insn_is_b(insn) ||
157 aarch64_insn_is_bl(insn) ||
158 aarch64_insn_is_svc(insn) ||
159 aarch64_insn_is_hvc(insn) ||
160 aarch64_insn_is_smc(insn) ||
161 aarch64_insn_is_brk(insn) ||
162 aarch64_insn_is_nop(insn);
165 bool __kprobes aarch64_insn_uses_literal(u32 insn)
167 /* ldr/ldrsw (literal), prfm */
169 return aarch64_insn_is_ldr_lit(insn) ||
170 aarch64_insn_is_ldrsw_lit(insn) ||
171 aarch64_insn_is_adr_adrp(insn) ||
172 aarch64_insn_is_prfm_lit(insn);
175 bool __kprobes aarch64_insn_is_branch(u32 insn)
177 /* b, bl, cb*, tb*, b.cond, br, blr */
179 return aarch64_insn_is_b(insn) ||
180 aarch64_insn_is_bl(insn) ||
181 aarch64_insn_is_cbz(insn) ||
182 aarch64_insn_is_cbnz(insn) ||
183 aarch64_insn_is_tbz(insn) ||
184 aarch64_insn_is_tbnz(insn) ||
185 aarch64_insn_is_ret(insn) ||
186 aarch64_insn_is_br(insn) ||
187 aarch64_insn_is_blr(insn) ||
188 aarch64_insn_is_bcond(insn);
192 * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
193 * Section B2.6.5 "Concurrent modification and execution of instructions":
194 * Concurrent modification and execution of instructions can lead to the
195 * resulting instruction performing any behavior that can be achieved by
196 * executing any sequence of instructions that can be executed from the
197 * same Exception level, except where the instruction before modification
198 * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
199 * or SMC instruction.
201 bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
203 return __aarch64_insn_hotpatch_safe(old_insn) &&
204 __aarch64_insn_hotpatch_safe(new_insn);
207 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
212 /* A64 instructions must be word aligned */
213 if ((uintptr_t)tp & 0x3)
216 ret = aarch64_insn_write(tp, insn);
218 flush_icache_range((uintptr_t)tp,
219 (uintptr_t)tp + AARCH64_INSN_SIZE);
224 struct aarch64_insn_patch {
231 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
234 struct aarch64_insn_patch *pp = arg;
236 /* The first CPU becomes master */
237 if (atomic_inc_return(&pp->cpu_count) == 1) {
238 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
239 ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
242 * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
243 * which ends with "dsb; isb" pair guaranteeing global
246 /* Notify other processors with an additional increment. */
247 atomic_inc(&pp->cpu_count);
249 while (atomic_read(&pp->cpu_count) <= num_online_cpus())
257 int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
259 struct aarch64_insn_patch patch = {
263 .cpu_count = ATOMIC_INIT(0),
269 return stop_machine(aarch64_insn_patch_text_cb, &patch,
273 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
278 /* Unsafe to patch multiple instructions without synchronizaiton */
280 ret = aarch64_insn_read(addrs[0], &insn);
284 if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
286 * ARMv8 architecture doesn't guarantee all CPUs see
287 * the new instruction after returning from function
288 * aarch64_insn_patch_text_nosync(). So send IPIs to
289 * all other CPUs to achieve instruction
292 ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
293 kick_all_cpus_sync();
298 return aarch64_insn_patch_text_sync(addrs, insns, cnt);
301 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
302 u32 *maskp, int *shiftp)
308 case AARCH64_INSN_IMM_26:
312 case AARCH64_INSN_IMM_19:
316 case AARCH64_INSN_IMM_16:
320 case AARCH64_INSN_IMM_14:
324 case AARCH64_INSN_IMM_12:
328 case AARCH64_INSN_IMM_9:
332 case AARCH64_INSN_IMM_7:
336 case AARCH64_INSN_IMM_6:
337 case AARCH64_INSN_IMM_S:
341 case AARCH64_INSN_IMM_R:
355 #define ADR_IMM_HILOSPLIT 2
356 #define ADR_IMM_SIZE SZ_2M
357 #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
358 #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
359 #define ADR_IMM_LOSHIFT 29
360 #define ADR_IMM_HISHIFT 5
362 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
364 u32 immlo, immhi, mask;
368 case AARCH64_INSN_IMM_ADR:
370 immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
371 immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
372 insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
373 mask = ADR_IMM_SIZE - 1;
376 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
377 pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
383 return (insn >> shift) & mask;
386 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
389 u32 immlo, immhi, mask;
392 if (insn == AARCH64_BREAK_FAULT)
393 return AARCH64_BREAK_FAULT;
396 case AARCH64_INSN_IMM_ADR:
398 immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
399 imm >>= ADR_IMM_HILOSPLIT;
400 immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
402 mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
403 (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
406 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
407 pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
409 return AARCH64_BREAK_FAULT;
413 /* Update the immediate field. */
414 insn &= ~(mask << shift);
415 insn |= (imm & mask) << shift;
420 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
422 enum aarch64_insn_register reg)
426 if (insn == AARCH64_BREAK_FAULT)
427 return AARCH64_BREAK_FAULT;
429 if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
430 pr_err("%s: unknown register encoding %d\n", __func__, reg);
431 return AARCH64_BREAK_FAULT;
435 case AARCH64_INSN_REGTYPE_RT:
436 case AARCH64_INSN_REGTYPE_RD:
439 case AARCH64_INSN_REGTYPE_RN:
442 case AARCH64_INSN_REGTYPE_RT2:
443 case AARCH64_INSN_REGTYPE_RA:
446 case AARCH64_INSN_REGTYPE_RM:
450 pr_err("%s: unknown register type encoding %d\n", __func__,
452 return AARCH64_BREAK_FAULT;
455 insn &= ~(GENMASK(4, 0) << shift);
456 insn |= reg << shift;
461 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
467 case AARCH64_INSN_SIZE_8:
470 case AARCH64_INSN_SIZE_16:
473 case AARCH64_INSN_SIZE_32:
476 case AARCH64_INSN_SIZE_64:
480 pr_err("%s: unknown size encoding %d\n", __func__, type);
481 return AARCH64_BREAK_FAULT;
484 insn &= ~GENMASK(31, 30);
490 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
495 if ((pc & 0x3) || (addr & 0x3)) {
496 pr_err("%s: A64 instructions must be word aligned\n", __func__);
500 offset = ((long)addr - (long)pc);
502 if (offset < -range || offset >= range) {
503 pr_err("%s: offset out of range\n", __func__);
510 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
511 enum aarch64_insn_branch_type type)
517 * B/BL support [-128M, 128M) offset
518 * ARM64 virtual address arrangement guarantees all kernel and module
519 * texts are within +/-128M.
521 offset = branch_imm_common(pc, addr, SZ_128M);
522 if (offset >= SZ_128M)
523 return AARCH64_BREAK_FAULT;
526 case AARCH64_INSN_BRANCH_LINK:
527 insn = aarch64_insn_get_bl_value();
529 case AARCH64_INSN_BRANCH_NOLINK:
530 insn = aarch64_insn_get_b_value();
533 pr_err("%s: unknown branch encoding %d\n", __func__, type);
534 return AARCH64_BREAK_FAULT;
537 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
541 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
542 enum aarch64_insn_register reg,
543 enum aarch64_insn_variant variant,
544 enum aarch64_insn_branch_type type)
549 offset = branch_imm_common(pc, addr, SZ_1M);
551 return AARCH64_BREAK_FAULT;
554 case AARCH64_INSN_BRANCH_COMP_ZERO:
555 insn = aarch64_insn_get_cbz_value();
557 case AARCH64_INSN_BRANCH_COMP_NONZERO:
558 insn = aarch64_insn_get_cbnz_value();
561 pr_err("%s: unknown branch encoding %d\n", __func__, type);
562 return AARCH64_BREAK_FAULT;
566 case AARCH64_INSN_VARIANT_32BIT:
568 case AARCH64_INSN_VARIANT_64BIT:
569 insn |= AARCH64_INSN_SF_BIT;
572 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
573 return AARCH64_BREAK_FAULT;
576 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
578 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
582 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
583 enum aarch64_insn_condition cond)
588 offset = branch_imm_common(pc, addr, SZ_1M);
590 insn = aarch64_insn_get_bcond_value();
592 if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
593 pr_err("%s: unknown condition encoding %d\n", __func__, cond);
594 return AARCH64_BREAK_FAULT;
598 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
602 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
604 return aarch64_insn_get_hint_value() | op;
607 u32 __kprobes aarch64_insn_gen_nop(void)
609 return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
612 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
613 enum aarch64_insn_branch_type type)
618 case AARCH64_INSN_BRANCH_NOLINK:
619 insn = aarch64_insn_get_br_value();
621 case AARCH64_INSN_BRANCH_LINK:
622 insn = aarch64_insn_get_blr_value();
624 case AARCH64_INSN_BRANCH_RETURN:
625 insn = aarch64_insn_get_ret_value();
628 pr_err("%s: unknown branch encoding %d\n", __func__, type);
629 return AARCH64_BREAK_FAULT;
632 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
635 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
636 enum aarch64_insn_register base,
637 enum aarch64_insn_register offset,
638 enum aarch64_insn_size_type size,
639 enum aarch64_insn_ldst_type type)
644 case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
645 insn = aarch64_insn_get_ldr_reg_value();
647 case AARCH64_INSN_LDST_STORE_REG_OFFSET:
648 insn = aarch64_insn_get_str_reg_value();
651 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
652 return AARCH64_BREAK_FAULT;
655 insn = aarch64_insn_encode_ldst_size(size, insn);
657 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
659 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
662 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
666 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
667 enum aarch64_insn_register reg2,
668 enum aarch64_insn_register base,
670 enum aarch64_insn_variant variant,
671 enum aarch64_insn_ldst_type type)
677 case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
678 insn = aarch64_insn_get_ldp_pre_value();
680 case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
681 insn = aarch64_insn_get_stp_pre_value();
683 case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
684 insn = aarch64_insn_get_ldp_post_value();
686 case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
687 insn = aarch64_insn_get_stp_post_value();
690 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
691 return AARCH64_BREAK_FAULT;
695 case AARCH64_INSN_VARIANT_32BIT:
696 if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
697 pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
699 return AARCH64_BREAK_FAULT;
703 case AARCH64_INSN_VARIANT_64BIT:
704 if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
705 pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
707 return AARCH64_BREAK_FAULT;
710 insn |= AARCH64_INSN_SF_BIT;
713 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
714 return AARCH64_BREAK_FAULT;
717 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
720 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
723 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
726 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
730 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
731 enum aarch64_insn_register src,
732 int imm, enum aarch64_insn_variant variant,
733 enum aarch64_insn_adsb_type type)
738 case AARCH64_INSN_ADSB_ADD:
739 insn = aarch64_insn_get_add_imm_value();
741 case AARCH64_INSN_ADSB_SUB:
742 insn = aarch64_insn_get_sub_imm_value();
744 case AARCH64_INSN_ADSB_ADD_SETFLAGS:
745 insn = aarch64_insn_get_adds_imm_value();
747 case AARCH64_INSN_ADSB_SUB_SETFLAGS:
748 insn = aarch64_insn_get_subs_imm_value();
751 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
752 return AARCH64_BREAK_FAULT;
756 case AARCH64_INSN_VARIANT_32BIT:
758 case AARCH64_INSN_VARIANT_64BIT:
759 insn |= AARCH64_INSN_SF_BIT;
762 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
763 return AARCH64_BREAK_FAULT;
766 if (imm & ~(SZ_4K - 1)) {
767 pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
768 return AARCH64_BREAK_FAULT;
771 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
773 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
775 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
778 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
779 enum aarch64_insn_register src,
781 enum aarch64_insn_variant variant,
782 enum aarch64_insn_bitfield_type type)
788 case AARCH64_INSN_BITFIELD_MOVE:
789 insn = aarch64_insn_get_bfm_value();
791 case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
792 insn = aarch64_insn_get_ubfm_value();
794 case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
795 insn = aarch64_insn_get_sbfm_value();
798 pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
799 return AARCH64_BREAK_FAULT;
803 case AARCH64_INSN_VARIANT_32BIT:
804 mask = GENMASK(4, 0);
806 case AARCH64_INSN_VARIANT_64BIT:
807 insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
808 mask = GENMASK(5, 0);
811 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
812 return AARCH64_BREAK_FAULT;
816 pr_err("%s: invalid immr encoding %d\n", __func__, immr);
817 return AARCH64_BREAK_FAULT;
820 pr_err("%s: invalid imms encoding %d\n", __func__, imms);
821 return AARCH64_BREAK_FAULT;
824 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
826 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
828 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
830 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
833 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
835 enum aarch64_insn_variant variant,
836 enum aarch64_insn_movewide_type type)
841 case AARCH64_INSN_MOVEWIDE_ZERO:
842 insn = aarch64_insn_get_movz_value();
844 case AARCH64_INSN_MOVEWIDE_KEEP:
845 insn = aarch64_insn_get_movk_value();
847 case AARCH64_INSN_MOVEWIDE_INVERSE:
848 insn = aarch64_insn_get_movn_value();
851 pr_err("%s: unknown movewide encoding %d\n", __func__, type);
852 return AARCH64_BREAK_FAULT;
855 if (imm & ~(SZ_64K - 1)) {
856 pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
857 return AARCH64_BREAK_FAULT;
861 case AARCH64_INSN_VARIANT_32BIT:
862 if (shift != 0 && shift != 16) {
863 pr_err("%s: invalid shift encoding %d\n", __func__,
865 return AARCH64_BREAK_FAULT;
868 case AARCH64_INSN_VARIANT_64BIT:
869 insn |= AARCH64_INSN_SF_BIT;
870 if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
871 pr_err("%s: invalid shift encoding %d\n", __func__,
873 return AARCH64_BREAK_FAULT;
877 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
878 return AARCH64_BREAK_FAULT;
881 insn |= (shift >> 4) << 21;
883 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
885 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
888 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
889 enum aarch64_insn_register src,
890 enum aarch64_insn_register reg,
892 enum aarch64_insn_variant variant,
893 enum aarch64_insn_adsb_type type)
898 case AARCH64_INSN_ADSB_ADD:
899 insn = aarch64_insn_get_add_value();
901 case AARCH64_INSN_ADSB_SUB:
902 insn = aarch64_insn_get_sub_value();
904 case AARCH64_INSN_ADSB_ADD_SETFLAGS:
905 insn = aarch64_insn_get_adds_value();
907 case AARCH64_INSN_ADSB_SUB_SETFLAGS:
908 insn = aarch64_insn_get_subs_value();
911 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
912 return AARCH64_BREAK_FAULT;
916 case AARCH64_INSN_VARIANT_32BIT:
917 if (shift & ~(SZ_32 - 1)) {
918 pr_err("%s: invalid shift encoding %d\n", __func__,
920 return AARCH64_BREAK_FAULT;
923 case AARCH64_INSN_VARIANT_64BIT:
924 insn |= AARCH64_INSN_SF_BIT;
925 if (shift & ~(SZ_64 - 1)) {
926 pr_err("%s: invalid shift encoding %d\n", __func__,
928 return AARCH64_BREAK_FAULT;
932 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
933 return AARCH64_BREAK_FAULT;
937 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
939 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
941 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
943 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
946 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
947 enum aarch64_insn_register src,
948 enum aarch64_insn_variant variant,
949 enum aarch64_insn_data1_type type)
954 case AARCH64_INSN_DATA1_REVERSE_16:
955 insn = aarch64_insn_get_rev16_value();
957 case AARCH64_INSN_DATA1_REVERSE_32:
958 insn = aarch64_insn_get_rev32_value();
960 case AARCH64_INSN_DATA1_REVERSE_64:
961 if (variant != AARCH64_INSN_VARIANT_64BIT) {
962 pr_err("%s: invalid variant for reverse64 %d\n",
964 return AARCH64_BREAK_FAULT;
966 insn = aarch64_insn_get_rev64_value();
969 pr_err("%s: unknown data1 encoding %d\n", __func__, type);
970 return AARCH64_BREAK_FAULT;
974 case AARCH64_INSN_VARIANT_32BIT:
976 case AARCH64_INSN_VARIANT_64BIT:
977 insn |= AARCH64_INSN_SF_BIT;
980 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
981 return AARCH64_BREAK_FAULT;
984 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
986 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
989 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
990 enum aarch64_insn_register src,
991 enum aarch64_insn_register reg,
992 enum aarch64_insn_variant variant,
993 enum aarch64_insn_data2_type type)
998 case AARCH64_INSN_DATA2_UDIV:
999 insn = aarch64_insn_get_udiv_value();
1001 case AARCH64_INSN_DATA2_SDIV:
1002 insn = aarch64_insn_get_sdiv_value();
1004 case AARCH64_INSN_DATA2_LSLV:
1005 insn = aarch64_insn_get_lslv_value();
1007 case AARCH64_INSN_DATA2_LSRV:
1008 insn = aarch64_insn_get_lsrv_value();
1010 case AARCH64_INSN_DATA2_ASRV:
1011 insn = aarch64_insn_get_asrv_value();
1013 case AARCH64_INSN_DATA2_RORV:
1014 insn = aarch64_insn_get_rorv_value();
1017 pr_err("%s: unknown data2 encoding %d\n", __func__, type);
1018 return AARCH64_BREAK_FAULT;
1022 case AARCH64_INSN_VARIANT_32BIT:
1024 case AARCH64_INSN_VARIANT_64BIT:
1025 insn |= AARCH64_INSN_SF_BIT;
1028 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1029 return AARCH64_BREAK_FAULT;
1032 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1034 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1036 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1039 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1040 enum aarch64_insn_register src,
1041 enum aarch64_insn_register reg1,
1042 enum aarch64_insn_register reg2,
1043 enum aarch64_insn_variant variant,
1044 enum aarch64_insn_data3_type type)
1049 case AARCH64_INSN_DATA3_MADD:
1050 insn = aarch64_insn_get_madd_value();
1052 case AARCH64_INSN_DATA3_MSUB:
1053 insn = aarch64_insn_get_msub_value();
1056 pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1057 return AARCH64_BREAK_FAULT;
1061 case AARCH64_INSN_VARIANT_32BIT:
1063 case AARCH64_INSN_VARIANT_64BIT:
1064 insn |= AARCH64_INSN_SF_BIT;
1067 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1068 return AARCH64_BREAK_FAULT;
1071 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1073 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1075 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1078 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1082 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1083 enum aarch64_insn_register src,
1084 enum aarch64_insn_register reg,
1086 enum aarch64_insn_variant variant,
1087 enum aarch64_insn_logic_type type)
1092 case AARCH64_INSN_LOGIC_AND:
1093 insn = aarch64_insn_get_and_value();
1095 case AARCH64_INSN_LOGIC_BIC:
1096 insn = aarch64_insn_get_bic_value();
1098 case AARCH64_INSN_LOGIC_ORR:
1099 insn = aarch64_insn_get_orr_value();
1101 case AARCH64_INSN_LOGIC_ORN:
1102 insn = aarch64_insn_get_orn_value();
1104 case AARCH64_INSN_LOGIC_EOR:
1105 insn = aarch64_insn_get_eor_value();
1107 case AARCH64_INSN_LOGIC_EON:
1108 insn = aarch64_insn_get_eon_value();
1110 case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1111 insn = aarch64_insn_get_ands_value();
1113 case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1114 insn = aarch64_insn_get_bics_value();
1117 pr_err("%s: unknown logical encoding %d\n", __func__, type);
1118 return AARCH64_BREAK_FAULT;
1122 case AARCH64_INSN_VARIANT_32BIT:
1123 if (shift & ~(SZ_32 - 1)) {
1124 pr_err("%s: invalid shift encoding %d\n", __func__,
1126 return AARCH64_BREAK_FAULT;
1129 case AARCH64_INSN_VARIANT_64BIT:
1130 insn |= AARCH64_INSN_SF_BIT;
1131 if (shift & ~(SZ_64 - 1)) {
1132 pr_err("%s: invalid shift encoding %d\n", __func__,
1134 return AARCH64_BREAK_FAULT;
1138 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1139 return AARCH64_BREAK_FAULT;
1143 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1145 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1147 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1149 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1153 * Decode the imm field of a branch, and return the byte offset as a
1154 * signed value (so it can be used when computing a new branch
1157 s32 aarch64_get_branch_offset(u32 insn)
1161 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1162 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1163 return (imm << 6) >> 4;
1166 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1167 aarch64_insn_is_bcond(insn)) {
1168 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1169 return (imm << 13) >> 11;
1172 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1173 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1174 return (imm << 18) >> 16;
1177 /* Unhandled instruction */
1182 * Encode the displacement of a branch in the imm field and return the
1183 * updated instruction.
1185 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1187 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1188 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1191 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1192 aarch64_insn_is_bcond(insn))
1193 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1196 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1197 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1200 /* Unhandled instruction */
1204 s32 aarch64_insn_adrp_get_offset(u32 insn)
1206 BUG_ON(!aarch64_insn_is_adrp(insn));
1207 return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1210 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1212 BUG_ON(!aarch64_insn_is_adrp(insn));
1213 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1218 * Extract the Op/CR data from a msr/mrs instruction.
1220 u32 aarch64_insn_extract_system_reg(u32 insn)
1222 return (insn & 0x1FFFE0) >> 5;
1225 bool aarch32_insn_is_wide(u32 insn)
1227 return insn >= 0xe800;
1231 * Macros/defines for extracting register numbers from instruction.
1233 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1235 return (insn & (0xf << offset)) >> offset;
1238 #define OPC2_MASK 0x7
1239 #define OPC2_OFFSET 5
1240 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1242 return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1245 #define CRM_MASK 0xf
1246 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1248 return insn & CRM_MASK;
1251 static bool __kprobes __check_eq(unsigned long pstate)
1253 return (pstate & PSR_Z_BIT) != 0;
1256 static bool __kprobes __check_ne(unsigned long pstate)
1258 return (pstate & PSR_Z_BIT) == 0;
1261 static bool __kprobes __check_cs(unsigned long pstate)
1263 return (pstate & PSR_C_BIT) != 0;
1266 static bool __kprobes __check_cc(unsigned long pstate)
1268 return (pstate & PSR_C_BIT) == 0;
1271 static bool __kprobes __check_mi(unsigned long pstate)
1273 return (pstate & PSR_N_BIT) != 0;
1276 static bool __kprobes __check_pl(unsigned long pstate)
1278 return (pstate & PSR_N_BIT) == 0;
1281 static bool __kprobes __check_vs(unsigned long pstate)
1283 return (pstate & PSR_V_BIT) != 0;
1286 static bool __kprobes __check_vc(unsigned long pstate)
1288 return (pstate & PSR_V_BIT) == 0;
1291 static bool __kprobes __check_hi(unsigned long pstate)
1293 pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1294 return (pstate & PSR_C_BIT) != 0;
1297 static bool __kprobes __check_ls(unsigned long pstate)
1299 pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1300 return (pstate & PSR_C_BIT) == 0;
1303 static bool __kprobes __check_ge(unsigned long pstate)
1305 pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1306 return (pstate & PSR_N_BIT) == 0;
1309 static bool __kprobes __check_lt(unsigned long pstate)
1311 pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1312 return (pstate & PSR_N_BIT) != 0;
1315 static bool __kprobes __check_gt(unsigned long pstate)
1317 /*PSR_N_BIT ^= PSR_V_BIT */
1318 unsigned long temp = pstate ^ (pstate << 3);
1320 temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
1321 return (temp & PSR_N_BIT) == 0;
1324 static bool __kprobes __check_le(unsigned long pstate)
1326 /*PSR_N_BIT ^= PSR_V_BIT */
1327 unsigned long temp = pstate ^ (pstate << 3);
1329 temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
1330 return (temp & PSR_N_BIT) != 0;
1333 static bool __kprobes __check_al(unsigned long pstate)
1339 * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
1340 * it behaves identically to 0b1110 ("al").
1342 pstate_check_t * const aarch32_opcode_cond_checks[16] = {
1343 __check_eq, __check_ne, __check_cs, __check_cc,
1344 __check_mi, __check_pl, __check_vs, __check_vc,
1345 __check_hi, __check_ls, __check_ge, __check_lt,
1346 __check_gt, __check_le, __check_al, __check_al