]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/arm64/kernel/insn.c
Merge tag 'clk-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[karo-tx-linux.git] / arch / arm64 / kernel / insn.c
1 /*
2  * Copyright (C) 2013 Huawei Ltd.
3  * Author: Jiang Liu <liuj97@gmail.com>
4  *
5  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/stop_machine.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
29
30 #include <asm/cacheflush.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/fixmap.h>
33 #include <asm/insn.h>
34
35 #define AARCH64_INSN_SF_BIT     BIT(31)
36 #define AARCH64_INSN_N_BIT      BIT(22)
37
38 static int aarch64_insn_encoding_class[] = {
39         AARCH64_INSN_CLS_UNKNOWN,
40         AARCH64_INSN_CLS_UNKNOWN,
41         AARCH64_INSN_CLS_UNKNOWN,
42         AARCH64_INSN_CLS_UNKNOWN,
43         AARCH64_INSN_CLS_LDST,
44         AARCH64_INSN_CLS_DP_REG,
45         AARCH64_INSN_CLS_LDST,
46         AARCH64_INSN_CLS_DP_FPSIMD,
47         AARCH64_INSN_CLS_DP_IMM,
48         AARCH64_INSN_CLS_DP_IMM,
49         AARCH64_INSN_CLS_BR_SYS,
50         AARCH64_INSN_CLS_BR_SYS,
51         AARCH64_INSN_CLS_LDST,
52         AARCH64_INSN_CLS_DP_REG,
53         AARCH64_INSN_CLS_LDST,
54         AARCH64_INSN_CLS_DP_FPSIMD,
55 };
56
57 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
58 {
59         return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
60 }
61
62 /* NOP is an alias of HINT */
63 bool __kprobes aarch64_insn_is_nop(u32 insn)
64 {
65         if (!aarch64_insn_is_hint(insn))
66                 return false;
67
68         switch (insn & 0xFE0) {
69         case AARCH64_INSN_HINT_YIELD:
70         case AARCH64_INSN_HINT_WFE:
71         case AARCH64_INSN_HINT_WFI:
72         case AARCH64_INSN_HINT_SEV:
73         case AARCH64_INSN_HINT_SEVL:
74                 return false;
75         default:
76                 return true;
77         }
78 }
79
80 bool aarch64_insn_is_branch_imm(u32 insn)
81 {
82         return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
83                 aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
84                 aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
85                 aarch64_insn_is_bcond(insn));
86 }
87
88 static DEFINE_RAW_SPINLOCK(patch_lock);
89
90 static void __kprobes *patch_map(void *addr, int fixmap)
91 {
92         unsigned long uintaddr = (uintptr_t) addr;
93         bool module = !core_kernel_text(uintaddr);
94         struct page *page;
95
96         if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
97                 page = vmalloc_to_page(addr);
98         else if (!module)
99                 page = pfn_to_page(PHYS_PFN(__pa(addr)));
100         else
101                 return addr;
102
103         BUG_ON(!page);
104         return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
105                         (uintaddr & ~PAGE_MASK));
106 }
107
108 static void __kprobes patch_unmap(int fixmap)
109 {
110         clear_fixmap(fixmap);
111 }
112 /*
113  * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
114  * little-endian.
115  */
116 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
117 {
118         int ret;
119         u32 val;
120
121         ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
122         if (!ret)
123                 *insnp = le32_to_cpu(val);
124
125         return ret;
126 }
127
128 static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
129 {
130         void *waddr = addr;
131         unsigned long flags = 0;
132         int ret;
133
134         raw_spin_lock_irqsave(&patch_lock, flags);
135         waddr = patch_map(addr, FIX_TEXT_POKE0);
136
137         ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
138
139         patch_unmap(FIX_TEXT_POKE0);
140         raw_spin_unlock_irqrestore(&patch_lock, flags);
141
142         return ret;
143 }
144
145 int __kprobes aarch64_insn_write(void *addr, u32 insn)
146 {
147         insn = cpu_to_le32(insn);
148         return __aarch64_insn_write(addr, insn);
149 }
150
151 static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
152 {
153         if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
154                 return false;
155
156         return  aarch64_insn_is_b(insn) ||
157                 aarch64_insn_is_bl(insn) ||
158                 aarch64_insn_is_svc(insn) ||
159                 aarch64_insn_is_hvc(insn) ||
160                 aarch64_insn_is_smc(insn) ||
161                 aarch64_insn_is_brk(insn) ||
162                 aarch64_insn_is_nop(insn);
163 }
164
165 bool __kprobes aarch64_insn_uses_literal(u32 insn)
166 {
167         /* ldr/ldrsw (literal), prfm */
168
169         return aarch64_insn_is_ldr_lit(insn) ||
170                 aarch64_insn_is_ldrsw_lit(insn) ||
171                 aarch64_insn_is_adr_adrp(insn) ||
172                 aarch64_insn_is_prfm_lit(insn);
173 }
174
175 bool __kprobes aarch64_insn_is_branch(u32 insn)
176 {
177         /* b, bl, cb*, tb*, b.cond, br, blr */
178
179         return aarch64_insn_is_b(insn) ||
180                 aarch64_insn_is_bl(insn) ||
181                 aarch64_insn_is_cbz(insn) ||
182                 aarch64_insn_is_cbnz(insn) ||
183                 aarch64_insn_is_tbz(insn) ||
184                 aarch64_insn_is_tbnz(insn) ||
185                 aarch64_insn_is_ret(insn) ||
186                 aarch64_insn_is_br(insn) ||
187                 aarch64_insn_is_blr(insn) ||
188                 aarch64_insn_is_bcond(insn);
189 }
190
191 /*
192  * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
193  * Section B2.6.5 "Concurrent modification and execution of instructions":
194  * Concurrent modification and execution of instructions can lead to the
195  * resulting instruction performing any behavior that can be achieved by
196  * executing any sequence of instructions that can be executed from the
197  * same Exception level, except where the instruction before modification
198  * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
199  * or SMC instruction.
200  */
201 bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
202 {
203         return __aarch64_insn_hotpatch_safe(old_insn) &&
204                __aarch64_insn_hotpatch_safe(new_insn);
205 }
206
207 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
208 {
209         u32 *tp = addr;
210         int ret;
211
212         /* A64 instructions must be word aligned */
213         if ((uintptr_t)tp & 0x3)
214                 return -EINVAL;
215
216         ret = aarch64_insn_write(tp, insn);
217         if (ret == 0)
218                 flush_icache_range((uintptr_t)tp,
219                                    (uintptr_t)tp + AARCH64_INSN_SIZE);
220
221         return ret;
222 }
223
224 struct aarch64_insn_patch {
225         void            **text_addrs;
226         u32             *new_insns;
227         int             insn_cnt;
228         atomic_t        cpu_count;
229 };
230
231 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
232 {
233         int i, ret = 0;
234         struct aarch64_insn_patch *pp = arg;
235
236         /* The first CPU becomes master */
237         if (atomic_inc_return(&pp->cpu_count) == 1) {
238                 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
239                         ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
240                                                              pp->new_insns[i]);
241                 /*
242                  * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
243                  * which ends with "dsb; isb" pair guaranteeing global
244                  * visibility.
245                  */
246                 /* Notify other processors with an additional increment. */
247                 atomic_inc(&pp->cpu_count);
248         } else {
249                 while (atomic_read(&pp->cpu_count) <= num_online_cpus())
250                         cpu_relax();
251                 isb();
252         }
253
254         return ret;
255 }
256
257 int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
258 {
259         struct aarch64_insn_patch patch = {
260                 .text_addrs = addrs,
261                 .new_insns = insns,
262                 .insn_cnt = cnt,
263                 .cpu_count = ATOMIC_INIT(0),
264         };
265
266         if (cnt <= 0)
267                 return -EINVAL;
268
269         return stop_machine(aarch64_insn_patch_text_cb, &patch,
270                             cpu_online_mask);
271 }
272
273 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
274 {
275         int ret;
276         u32 insn;
277
278         /* Unsafe to patch multiple instructions without synchronizaiton */
279         if (cnt == 1) {
280                 ret = aarch64_insn_read(addrs[0], &insn);
281                 if (ret)
282                         return ret;
283
284                 if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
285                         /*
286                          * ARMv8 architecture doesn't guarantee all CPUs see
287                          * the new instruction after returning from function
288                          * aarch64_insn_patch_text_nosync(). So send IPIs to
289                          * all other CPUs to achieve instruction
290                          * synchronization.
291                          */
292                         ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
293                         kick_all_cpus_sync();
294                         return ret;
295                 }
296         }
297
298         return aarch64_insn_patch_text_sync(addrs, insns, cnt);
299 }
300
301 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
302                                                 u32 *maskp, int *shiftp)
303 {
304         u32 mask;
305         int shift;
306
307         switch (type) {
308         case AARCH64_INSN_IMM_26:
309                 mask = BIT(26) - 1;
310                 shift = 0;
311                 break;
312         case AARCH64_INSN_IMM_19:
313                 mask = BIT(19) - 1;
314                 shift = 5;
315                 break;
316         case AARCH64_INSN_IMM_16:
317                 mask = BIT(16) - 1;
318                 shift = 5;
319                 break;
320         case AARCH64_INSN_IMM_14:
321                 mask = BIT(14) - 1;
322                 shift = 5;
323                 break;
324         case AARCH64_INSN_IMM_12:
325                 mask = BIT(12) - 1;
326                 shift = 10;
327                 break;
328         case AARCH64_INSN_IMM_9:
329                 mask = BIT(9) - 1;
330                 shift = 12;
331                 break;
332         case AARCH64_INSN_IMM_7:
333                 mask = BIT(7) - 1;
334                 shift = 15;
335                 break;
336         case AARCH64_INSN_IMM_6:
337         case AARCH64_INSN_IMM_S:
338                 mask = BIT(6) - 1;
339                 shift = 10;
340                 break;
341         case AARCH64_INSN_IMM_R:
342                 mask = BIT(6) - 1;
343                 shift = 16;
344                 break;
345         default:
346                 return -EINVAL;
347         }
348
349         *maskp = mask;
350         *shiftp = shift;
351
352         return 0;
353 }
354
355 #define ADR_IMM_HILOSPLIT       2
356 #define ADR_IMM_SIZE            SZ_2M
357 #define ADR_IMM_LOMASK          ((1 << ADR_IMM_HILOSPLIT) - 1)
358 #define ADR_IMM_HIMASK          ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
359 #define ADR_IMM_LOSHIFT         29
360 #define ADR_IMM_HISHIFT         5
361
362 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
363 {
364         u32 immlo, immhi, mask;
365         int shift;
366
367         switch (type) {
368         case AARCH64_INSN_IMM_ADR:
369                 shift = 0;
370                 immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
371                 immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
372                 insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
373                 mask = ADR_IMM_SIZE - 1;
374                 break;
375         default:
376                 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
377                         pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
378                                type);
379                         return 0;
380                 }
381         }
382
383         return (insn >> shift) & mask;
384 }
385
386 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
387                                   u32 insn, u64 imm)
388 {
389         u32 immlo, immhi, mask;
390         int shift;
391
392         if (insn == AARCH64_BREAK_FAULT)
393                 return AARCH64_BREAK_FAULT;
394
395         switch (type) {
396         case AARCH64_INSN_IMM_ADR:
397                 shift = 0;
398                 immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
399                 imm >>= ADR_IMM_HILOSPLIT;
400                 immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
401                 imm = immlo | immhi;
402                 mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
403                         (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
404                 break;
405         default:
406                 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
407                         pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
408                                type);
409                         return AARCH64_BREAK_FAULT;
410                 }
411         }
412
413         /* Update the immediate field. */
414         insn &= ~(mask << shift);
415         insn |= (imm & mask) << shift;
416
417         return insn;
418 }
419
420 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
421                                         u32 insn,
422                                         enum aarch64_insn_register reg)
423 {
424         int shift;
425
426         if (insn == AARCH64_BREAK_FAULT)
427                 return AARCH64_BREAK_FAULT;
428
429         if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
430                 pr_err("%s: unknown register encoding %d\n", __func__, reg);
431                 return AARCH64_BREAK_FAULT;
432         }
433
434         switch (type) {
435         case AARCH64_INSN_REGTYPE_RT:
436         case AARCH64_INSN_REGTYPE_RD:
437                 shift = 0;
438                 break;
439         case AARCH64_INSN_REGTYPE_RN:
440                 shift = 5;
441                 break;
442         case AARCH64_INSN_REGTYPE_RT2:
443         case AARCH64_INSN_REGTYPE_RA:
444                 shift = 10;
445                 break;
446         case AARCH64_INSN_REGTYPE_RM:
447                 shift = 16;
448                 break;
449         default:
450                 pr_err("%s: unknown register type encoding %d\n", __func__,
451                        type);
452                 return AARCH64_BREAK_FAULT;
453         }
454
455         insn &= ~(GENMASK(4, 0) << shift);
456         insn |= reg << shift;
457
458         return insn;
459 }
460
461 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
462                                          u32 insn)
463 {
464         u32 size;
465
466         switch (type) {
467         case AARCH64_INSN_SIZE_8:
468                 size = 0;
469                 break;
470         case AARCH64_INSN_SIZE_16:
471                 size = 1;
472                 break;
473         case AARCH64_INSN_SIZE_32:
474                 size = 2;
475                 break;
476         case AARCH64_INSN_SIZE_64:
477                 size = 3;
478                 break;
479         default:
480                 pr_err("%s: unknown size encoding %d\n", __func__, type);
481                 return AARCH64_BREAK_FAULT;
482         }
483
484         insn &= ~GENMASK(31, 30);
485         insn |= size << 30;
486
487         return insn;
488 }
489
490 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
491                                      long range)
492 {
493         long offset;
494
495         if ((pc & 0x3) || (addr & 0x3)) {
496                 pr_err("%s: A64 instructions must be word aligned\n", __func__);
497                 return range;
498         }
499
500         offset = ((long)addr - (long)pc);
501
502         if (offset < -range || offset >= range) {
503                 pr_err("%s: offset out of range\n", __func__);
504                 return range;
505         }
506
507         return offset;
508 }
509
510 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
511                                           enum aarch64_insn_branch_type type)
512 {
513         u32 insn;
514         long offset;
515
516         /*
517          * B/BL support [-128M, 128M) offset
518          * ARM64 virtual address arrangement guarantees all kernel and module
519          * texts are within +/-128M.
520          */
521         offset = branch_imm_common(pc, addr, SZ_128M);
522         if (offset >= SZ_128M)
523                 return AARCH64_BREAK_FAULT;
524
525         switch (type) {
526         case AARCH64_INSN_BRANCH_LINK:
527                 insn = aarch64_insn_get_bl_value();
528                 break;
529         case AARCH64_INSN_BRANCH_NOLINK:
530                 insn = aarch64_insn_get_b_value();
531                 break;
532         default:
533                 pr_err("%s: unknown branch encoding %d\n", __func__, type);
534                 return AARCH64_BREAK_FAULT;
535         }
536
537         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
538                                              offset >> 2);
539 }
540
541 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
542                                      enum aarch64_insn_register reg,
543                                      enum aarch64_insn_variant variant,
544                                      enum aarch64_insn_branch_type type)
545 {
546         u32 insn;
547         long offset;
548
549         offset = branch_imm_common(pc, addr, SZ_1M);
550         if (offset >= SZ_1M)
551                 return AARCH64_BREAK_FAULT;
552
553         switch (type) {
554         case AARCH64_INSN_BRANCH_COMP_ZERO:
555                 insn = aarch64_insn_get_cbz_value();
556                 break;
557         case AARCH64_INSN_BRANCH_COMP_NONZERO:
558                 insn = aarch64_insn_get_cbnz_value();
559                 break;
560         default:
561                 pr_err("%s: unknown branch encoding %d\n", __func__, type);
562                 return AARCH64_BREAK_FAULT;
563         }
564
565         switch (variant) {
566         case AARCH64_INSN_VARIANT_32BIT:
567                 break;
568         case AARCH64_INSN_VARIANT_64BIT:
569                 insn |= AARCH64_INSN_SF_BIT;
570                 break;
571         default:
572                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
573                 return AARCH64_BREAK_FAULT;
574         }
575
576         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
577
578         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
579                                              offset >> 2);
580 }
581
582 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
583                                      enum aarch64_insn_condition cond)
584 {
585         u32 insn;
586         long offset;
587
588         offset = branch_imm_common(pc, addr, SZ_1M);
589
590         insn = aarch64_insn_get_bcond_value();
591
592         if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
593                 pr_err("%s: unknown condition encoding %d\n", __func__, cond);
594                 return AARCH64_BREAK_FAULT;
595         }
596         insn |= cond;
597
598         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
599                                              offset >> 2);
600 }
601
602 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
603 {
604         return aarch64_insn_get_hint_value() | op;
605 }
606
607 u32 __kprobes aarch64_insn_gen_nop(void)
608 {
609         return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
610 }
611
612 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
613                                 enum aarch64_insn_branch_type type)
614 {
615         u32 insn;
616
617         switch (type) {
618         case AARCH64_INSN_BRANCH_NOLINK:
619                 insn = aarch64_insn_get_br_value();
620                 break;
621         case AARCH64_INSN_BRANCH_LINK:
622                 insn = aarch64_insn_get_blr_value();
623                 break;
624         case AARCH64_INSN_BRANCH_RETURN:
625                 insn = aarch64_insn_get_ret_value();
626                 break;
627         default:
628                 pr_err("%s: unknown branch encoding %d\n", __func__, type);
629                 return AARCH64_BREAK_FAULT;
630         }
631
632         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
633 }
634
635 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
636                                     enum aarch64_insn_register base,
637                                     enum aarch64_insn_register offset,
638                                     enum aarch64_insn_size_type size,
639                                     enum aarch64_insn_ldst_type type)
640 {
641         u32 insn;
642
643         switch (type) {
644         case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
645                 insn = aarch64_insn_get_ldr_reg_value();
646                 break;
647         case AARCH64_INSN_LDST_STORE_REG_OFFSET:
648                 insn = aarch64_insn_get_str_reg_value();
649                 break;
650         default:
651                 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
652                 return AARCH64_BREAK_FAULT;
653         }
654
655         insn = aarch64_insn_encode_ldst_size(size, insn);
656
657         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
658
659         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
660                                             base);
661
662         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
663                                             offset);
664 }
665
666 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
667                                      enum aarch64_insn_register reg2,
668                                      enum aarch64_insn_register base,
669                                      int offset,
670                                      enum aarch64_insn_variant variant,
671                                      enum aarch64_insn_ldst_type type)
672 {
673         u32 insn;
674         int shift;
675
676         switch (type) {
677         case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
678                 insn = aarch64_insn_get_ldp_pre_value();
679                 break;
680         case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
681                 insn = aarch64_insn_get_stp_pre_value();
682                 break;
683         case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
684                 insn = aarch64_insn_get_ldp_post_value();
685                 break;
686         case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
687                 insn = aarch64_insn_get_stp_post_value();
688                 break;
689         default:
690                 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
691                 return AARCH64_BREAK_FAULT;
692         }
693
694         switch (variant) {
695         case AARCH64_INSN_VARIANT_32BIT:
696                 if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
697                         pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
698                                __func__, offset);
699                         return AARCH64_BREAK_FAULT;
700                 }
701                 shift = 2;
702                 break;
703         case AARCH64_INSN_VARIANT_64BIT:
704                 if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
705                         pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
706                                __func__, offset);
707                         return AARCH64_BREAK_FAULT;
708                 }
709                 shift = 3;
710                 insn |= AARCH64_INSN_SF_BIT;
711                 break;
712         default:
713                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
714                 return AARCH64_BREAK_FAULT;
715         }
716
717         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
718                                             reg1);
719
720         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
721                                             reg2);
722
723         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
724                                             base);
725
726         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
727                                              offset >> shift);
728 }
729
730 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
731                                  enum aarch64_insn_register src,
732                                  int imm, enum aarch64_insn_variant variant,
733                                  enum aarch64_insn_adsb_type type)
734 {
735         u32 insn;
736
737         switch (type) {
738         case AARCH64_INSN_ADSB_ADD:
739                 insn = aarch64_insn_get_add_imm_value();
740                 break;
741         case AARCH64_INSN_ADSB_SUB:
742                 insn = aarch64_insn_get_sub_imm_value();
743                 break;
744         case AARCH64_INSN_ADSB_ADD_SETFLAGS:
745                 insn = aarch64_insn_get_adds_imm_value();
746                 break;
747         case AARCH64_INSN_ADSB_SUB_SETFLAGS:
748                 insn = aarch64_insn_get_subs_imm_value();
749                 break;
750         default:
751                 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
752                 return AARCH64_BREAK_FAULT;
753         }
754
755         switch (variant) {
756         case AARCH64_INSN_VARIANT_32BIT:
757                 break;
758         case AARCH64_INSN_VARIANT_64BIT:
759                 insn |= AARCH64_INSN_SF_BIT;
760                 break;
761         default:
762                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
763                 return AARCH64_BREAK_FAULT;
764         }
765
766         if (imm & ~(SZ_4K - 1)) {
767                 pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
768                 return AARCH64_BREAK_FAULT;
769         }
770
771         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
772
773         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
774
775         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
776 }
777
778 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
779                               enum aarch64_insn_register src,
780                               int immr, int imms,
781                               enum aarch64_insn_variant variant,
782                               enum aarch64_insn_bitfield_type type)
783 {
784         u32 insn;
785         u32 mask;
786
787         switch (type) {
788         case AARCH64_INSN_BITFIELD_MOVE:
789                 insn = aarch64_insn_get_bfm_value();
790                 break;
791         case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
792                 insn = aarch64_insn_get_ubfm_value();
793                 break;
794         case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
795                 insn = aarch64_insn_get_sbfm_value();
796                 break;
797         default:
798                 pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
799                 return AARCH64_BREAK_FAULT;
800         }
801
802         switch (variant) {
803         case AARCH64_INSN_VARIANT_32BIT:
804                 mask = GENMASK(4, 0);
805                 break;
806         case AARCH64_INSN_VARIANT_64BIT:
807                 insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
808                 mask = GENMASK(5, 0);
809                 break;
810         default:
811                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
812                 return AARCH64_BREAK_FAULT;
813         }
814
815         if (immr & ~mask) {
816                 pr_err("%s: invalid immr encoding %d\n", __func__, immr);
817                 return AARCH64_BREAK_FAULT;
818         }
819         if (imms & ~mask) {
820                 pr_err("%s: invalid imms encoding %d\n", __func__, imms);
821                 return AARCH64_BREAK_FAULT;
822         }
823
824         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
825
826         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
827
828         insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
829
830         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
831 }
832
833 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
834                               int imm, int shift,
835                               enum aarch64_insn_variant variant,
836                               enum aarch64_insn_movewide_type type)
837 {
838         u32 insn;
839
840         switch (type) {
841         case AARCH64_INSN_MOVEWIDE_ZERO:
842                 insn = aarch64_insn_get_movz_value();
843                 break;
844         case AARCH64_INSN_MOVEWIDE_KEEP:
845                 insn = aarch64_insn_get_movk_value();
846                 break;
847         case AARCH64_INSN_MOVEWIDE_INVERSE:
848                 insn = aarch64_insn_get_movn_value();
849                 break;
850         default:
851                 pr_err("%s: unknown movewide encoding %d\n", __func__, type);
852                 return AARCH64_BREAK_FAULT;
853         }
854
855         if (imm & ~(SZ_64K - 1)) {
856                 pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
857                 return AARCH64_BREAK_FAULT;
858         }
859
860         switch (variant) {
861         case AARCH64_INSN_VARIANT_32BIT:
862                 if (shift != 0 && shift != 16) {
863                         pr_err("%s: invalid shift encoding %d\n", __func__,
864                                shift);
865                         return AARCH64_BREAK_FAULT;
866                 }
867                 break;
868         case AARCH64_INSN_VARIANT_64BIT:
869                 insn |= AARCH64_INSN_SF_BIT;
870                 if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
871                         pr_err("%s: invalid shift encoding %d\n", __func__,
872                                shift);
873                         return AARCH64_BREAK_FAULT;
874                 }
875                 break;
876         default:
877                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
878                 return AARCH64_BREAK_FAULT;
879         }
880
881         insn |= (shift >> 4) << 21;
882
883         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
884
885         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
886 }
887
888 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
889                                          enum aarch64_insn_register src,
890                                          enum aarch64_insn_register reg,
891                                          int shift,
892                                          enum aarch64_insn_variant variant,
893                                          enum aarch64_insn_adsb_type type)
894 {
895         u32 insn;
896
897         switch (type) {
898         case AARCH64_INSN_ADSB_ADD:
899                 insn = aarch64_insn_get_add_value();
900                 break;
901         case AARCH64_INSN_ADSB_SUB:
902                 insn = aarch64_insn_get_sub_value();
903                 break;
904         case AARCH64_INSN_ADSB_ADD_SETFLAGS:
905                 insn = aarch64_insn_get_adds_value();
906                 break;
907         case AARCH64_INSN_ADSB_SUB_SETFLAGS:
908                 insn = aarch64_insn_get_subs_value();
909                 break;
910         default:
911                 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
912                 return AARCH64_BREAK_FAULT;
913         }
914
915         switch (variant) {
916         case AARCH64_INSN_VARIANT_32BIT:
917                 if (shift & ~(SZ_32 - 1)) {
918                         pr_err("%s: invalid shift encoding %d\n", __func__,
919                                shift);
920                         return AARCH64_BREAK_FAULT;
921                 }
922                 break;
923         case AARCH64_INSN_VARIANT_64BIT:
924                 insn |= AARCH64_INSN_SF_BIT;
925                 if (shift & ~(SZ_64 - 1)) {
926                         pr_err("%s: invalid shift encoding %d\n", __func__,
927                                shift);
928                         return AARCH64_BREAK_FAULT;
929                 }
930                 break;
931         default:
932                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
933                 return AARCH64_BREAK_FAULT;
934         }
935
936
937         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
938
939         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
940
941         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
942
943         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
944 }
945
946 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
947                            enum aarch64_insn_register src,
948                            enum aarch64_insn_variant variant,
949                            enum aarch64_insn_data1_type type)
950 {
951         u32 insn;
952
953         switch (type) {
954         case AARCH64_INSN_DATA1_REVERSE_16:
955                 insn = aarch64_insn_get_rev16_value();
956                 break;
957         case AARCH64_INSN_DATA1_REVERSE_32:
958                 insn = aarch64_insn_get_rev32_value();
959                 break;
960         case AARCH64_INSN_DATA1_REVERSE_64:
961                 if (variant != AARCH64_INSN_VARIANT_64BIT) {
962                         pr_err("%s: invalid variant for reverse64 %d\n",
963                                __func__, variant);
964                         return AARCH64_BREAK_FAULT;
965                 }
966                 insn = aarch64_insn_get_rev64_value();
967                 break;
968         default:
969                 pr_err("%s: unknown data1 encoding %d\n", __func__, type);
970                 return AARCH64_BREAK_FAULT;
971         }
972
973         switch (variant) {
974         case AARCH64_INSN_VARIANT_32BIT:
975                 break;
976         case AARCH64_INSN_VARIANT_64BIT:
977                 insn |= AARCH64_INSN_SF_BIT;
978                 break;
979         default:
980                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
981                 return AARCH64_BREAK_FAULT;
982         }
983
984         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
985
986         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
987 }
988
989 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
990                            enum aarch64_insn_register src,
991                            enum aarch64_insn_register reg,
992                            enum aarch64_insn_variant variant,
993                            enum aarch64_insn_data2_type type)
994 {
995         u32 insn;
996
997         switch (type) {
998         case AARCH64_INSN_DATA2_UDIV:
999                 insn = aarch64_insn_get_udiv_value();
1000                 break;
1001         case AARCH64_INSN_DATA2_SDIV:
1002                 insn = aarch64_insn_get_sdiv_value();
1003                 break;
1004         case AARCH64_INSN_DATA2_LSLV:
1005                 insn = aarch64_insn_get_lslv_value();
1006                 break;
1007         case AARCH64_INSN_DATA2_LSRV:
1008                 insn = aarch64_insn_get_lsrv_value();
1009                 break;
1010         case AARCH64_INSN_DATA2_ASRV:
1011                 insn = aarch64_insn_get_asrv_value();
1012                 break;
1013         case AARCH64_INSN_DATA2_RORV:
1014                 insn = aarch64_insn_get_rorv_value();
1015                 break;
1016         default:
1017                 pr_err("%s: unknown data2 encoding %d\n", __func__, type);
1018                 return AARCH64_BREAK_FAULT;
1019         }
1020
1021         switch (variant) {
1022         case AARCH64_INSN_VARIANT_32BIT:
1023                 break;
1024         case AARCH64_INSN_VARIANT_64BIT:
1025                 insn |= AARCH64_INSN_SF_BIT;
1026                 break;
1027         default:
1028                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1029                 return AARCH64_BREAK_FAULT;
1030         }
1031
1032         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1033
1034         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1035
1036         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1037 }
1038
1039 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1040                            enum aarch64_insn_register src,
1041                            enum aarch64_insn_register reg1,
1042                            enum aarch64_insn_register reg2,
1043                            enum aarch64_insn_variant variant,
1044                            enum aarch64_insn_data3_type type)
1045 {
1046         u32 insn;
1047
1048         switch (type) {
1049         case AARCH64_INSN_DATA3_MADD:
1050                 insn = aarch64_insn_get_madd_value();
1051                 break;
1052         case AARCH64_INSN_DATA3_MSUB:
1053                 insn = aarch64_insn_get_msub_value();
1054                 break;
1055         default:
1056                 pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1057                 return AARCH64_BREAK_FAULT;
1058         }
1059
1060         switch (variant) {
1061         case AARCH64_INSN_VARIANT_32BIT:
1062                 break;
1063         case AARCH64_INSN_VARIANT_64BIT:
1064                 insn |= AARCH64_INSN_SF_BIT;
1065                 break;
1066         default:
1067                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1068                 return AARCH64_BREAK_FAULT;
1069         }
1070
1071         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1072
1073         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1074
1075         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1076                                             reg1);
1077
1078         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1079                                             reg2);
1080 }
1081
1082 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1083                                          enum aarch64_insn_register src,
1084                                          enum aarch64_insn_register reg,
1085                                          int shift,
1086                                          enum aarch64_insn_variant variant,
1087                                          enum aarch64_insn_logic_type type)
1088 {
1089         u32 insn;
1090
1091         switch (type) {
1092         case AARCH64_INSN_LOGIC_AND:
1093                 insn = aarch64_insn_get_and_value();
1094                 break;
1095         case AARCH64_INSN_LOGIC_BIC:
1096                 insn = aarch64_insn_get_bic_value();
1097                 break;
1098         case AARCH64_INSN_LOGIC_ORR:
1099                 insn = aarch64_insn_get_orr_value();
1100                 break;
1101         case AARCH64_INSN_LOGIC_ORN:
1102                 insn = aarch64_insn_get_orn_value();
1103                 break;
1104         case AARCH64_INSN_LOGIC_EOR:
1105                 insn = aarch64_insn_get_eor_value();
1106                 break;
1107         case AARCH64_INSN_LOGIC_EON:
1108                 insn = aarch64_insn_get_eon_value();
1109                 break;
1110         case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1111                 insn = aarch64_insn_get_ands_value();
1112                 break;
1113         case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1114                 insn = aarch64_insn_get_bics_value();
1115                 break;
1116         default:
1117                 pr_err("%s: unknown logical encoding %d\n", __func__, type);
1118                 return AARCH64_BREAK_FAULT;
1119         }
1120
1121         switch (variant) {
1122         case AARCH64_INSN_VARIANT_32BIT:
1123                 if (shift & ~(SZ_32 - 1)) {
1124                         pr_err("%s: invalid shift encoding %d\n", __func__,
1125                                shift);
1126                         return AARCH64_BREAK_FAULT;
1127                 }
1128                 break;
1129         case AARCH64_INSN_VARIANT_64BIT:
1130                 insn |= AARCH64_INSN_SF_BIT;
1131                 if (shift & ~(SZ_64 - 1)) {
1132                         pr_err("%s: invalid shift encoding %d\n", __func__,
1133                                shift);
1134                         return AARCH64_BREAK_FAULT;
1135                 }
1136                 break;
1137         default:
1138                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1139                 return AARCH64_BREAK_FAULT;
1140         }
1141
1142
1143         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1144
1145         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1146
1147         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1148
1149         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1150 }
1151
1152 /*
1153  * Decode the imm field of a branch, and return the byte offset as a
1154  * signed value (so it can be used when computing a new branch
1155  * target).
1156  */
1157 s32 aarch64_get_branch_offset(u32 insn)
1158 {
1159         s32 imm;
1160
1161         if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1162                 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1163                 return (imm << 6) >> 4;
1164         }
1165
1166         if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1167             aarch64_insn_is_bcond(insn)) {
1168                 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1169                 return (imm << 13) >> 11;
1170         }
1171
1172         if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1173                 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1174                 return (imm << 18) >> 16;
1175         }
1176
1177         /* Unhandled instruction */
1178         BUG();
1179 }
1180
1181 /*
1182  * Encode the displacement of a branch in the imm field and return the
1183  * updated instruction.
1184  */
1185 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1186 {
1187         if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1188                 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1189                                                      offset >> 2);
1190
1191         if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1192             aarch64_insn_is_bcond(insn))
1193                 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1194                                                      offset >> 2);
1195
1196         if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1197                 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1198                                                      offset >> 2);
1199
1200         /* Unhandled instruction */
1201         BUG();
1202 }
1203
1204 s32 aarch64_insn_adrp_get_offset(u32 insn)
1205 {
1206         BUG_ON(!aarch64_insn_is_adrp(insn));
1207         return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1208 }
1209
1210 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1211 {
1212         BUG_ON(!aarch64_insn_is_adrp(insn));
1213         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1214                                                 offset >> 12);
1215 }
1216
1217 /*
1218  * Extract the Op/CR data from a msr/mrs instruction.
1219  */
1220 u32 aarch64_insn_extract_system_reg(u32 insn)
1221 {
1222         return (insn & 0x1FFFE0) >> 5;
1223 }
1224
1225 bool aarch32_insn_is_wide(u32 insn)
1226 {
1227         return insn >= 0xe800;
1228 }
1229
1230 /*
1231  * Macros/defines for extracting register numbers from instruction.
1232  */
1233 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1234 {
1235         return (insn & (0xf << offset)) >> offset;
1236 }
1237
1238 #define OPC2_MASK       0x7
1239 #define OPC2_OFFSET     5
1240 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1241 {
1242         return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1243 }
1244
1245 #define CRM_MASK        0xf
1246 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1247 {
1248         return insn & CRM_MASK;
1249 }
1250
1251 static bool __kprobes __check_eq(unsigned long pstate)
1252 {
1253         return (pstate & PSR_Z_BIT) != 0;
1254 }
1255
1256 static bool __kprobes __check_ne(unsigned long pstate)
1257 {
1258         return (pstate & PSR_Z_BIT) == 0;
1259 }
1260
1261 static bool __kprobes __check_cs(unsigned long pstate)
1262 {
1263         return (pstate & PSR_C_BIT) != 0;
1264 }
1265
1266 static bool __kprobes __check_cc(unsigned long pstate)
1267 {
1268         return (pstate & PSR_C_BIT) == 0;
1269 }
1270
1271 static bool __kprobes __check_mi(unsigned long pstate)
1272 {
1273         return (pstate & PSR_N_BIT) != 0;
1274 }
1275
1276 static bool __kprobes __check_pl(unsigned long pstate)
1277 {
1278         return (pstate & PSR_N_BIT) == 0;
1279 }
1280
1281 static bool __kprobes __check_vs(unsigned long pstate)
1282 {
1283         return (pstate & PSR_V_BIT) != 0;
1284 }
1285
1286 static bool __kprobes __check_vc(unsigned long pstate)
1287 {
1288         return (pstate & PSR_V_BIT) == 0;
1289 }
1290
1291 static bool __kprobes __check_hi(unsigned long pstate)
1292 {
1293         pstate &= ~(pstate >> 1);       /* PSR_C_BIT &= ~PSR_Z_BIT */
1294         return (pstate & PSR_C_BIT) != 0;
1295 }
1296
1297 static bool __kprobes __check_ls(unsigned long pstate)
1298 {
1299         pstate &= ~(pstate >> 1);       /* PSR_C_BIT &= ~PSR_Z_BIT */
1300         return (pstate & PSR_C_BIT) == 0;
1301 }
1302
1303 static bool __kprobes __check_ge(unsigned long pstate)
1304 {
1305         pstate ^= (pstate << 3);        /* PSR_N_BIT ^= PSR_V_BIT */
1306         return (pstate & PSR_N_BIT) == 0;
1307 }
1308
1309 static bool __kprobes __check_lt(unsigned long pstate)
1310 {
1311         pstate ^= (pstate << 3);        /* PSR_N_BIT ^= PSR_V_BIT */
1312         return (pstate & PSR_N_BIT) != 0;
1313 }
1314
1315 static bool __kprobes __check_gt(unsigned long pstate)
1316 {
1317         /*PSR_N_BIT ^= PSR_V_BIT */
1318         unsigned long temp = pstate ^ (pstate << 3);
1319
1320         temp |= (pstate << 1);  /*PSR_N_BIT |= PSR_Z_BIT */
1321         return (temp & PSR_N_BIT) == 0;
1322 }
1323
1324 static bool __kprobes __check_le(unsigned long pstate)
1325 {
1326         /*PSR_N_BIT ^= PSR_V_BIT */
1327         unsigned long temp = pstate ^ (pstate << 3);
1328
1329         temp |= (pstate << 1);  /*PSR_N_BIT |= PSR_Z_BIT */
1330         return (temp & PSR_N_BIT) != 0;
1331 }
1332
1333 static bool __kprobes __check_al(unsigned long pstate)
1334 {
1335         return true;
1336 }
1337
1338 /*
1339  * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
1340  * it behaves identically to 0b1110 ("al").
1341  */
1342 pstate_check_t * const aarch32_opcode_cond_checks[16] = {
1343         __check_eq, __check_ne, __check_cs, __check_cc,
1344         __check_mi, __check_pl, __check_vs, __check_vc,
1345         __check_hi, __check_ls, __check_ge, __check_lt,
1346         __check_gt, __check_le, __check_al, __check_al
1347 };