2 * BPF JIT compiler for ARM64
4 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #define pr_fmt(fmt) "bpf_jit: " fmt
21 #include <linux/bpf.h>
22 #include <linux/filter.h>
23 #include <linux/printk.h>
24 #include <linux/skbuff.h>
25 #include <linux/slab.h>
27 #include <asm/byteorder.h>
28 #include <asm/cacheflush.h>
29 #include <asm/debug-monitors.h>
30 #include <asm/set_memory.h>
34 int bpf_jit_enable __read_mostly;
36 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
37 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
38 #define TCALL_CNT (MAX_BPF_JIT_REG + 2)
40 /* Map BPF registers to A64 registers */
41 static const int bpf2a64[] = {
42 /* return value from in-kernel function, and exit value from eBPF */
43 [BPF_REG_0] = A64_R(7),
44 /* arguments from eBPF program to in-kernel function */
45 [BPF_REG_1] = A64_R(0),
46 [BPF_REG_2] = A64_R(1),
47 [BPF_REG_3] = A64_R(2),
48 [BPF_REG_4] = A64_R(3),
49 [BPF_REG_5] = A64_R(4),
50 /* callee saved registers that in-kernel function will preserve */
51 [BPF_REG_6] = A64_R(19),
52 [BPF_REG_7] = A64_R(20),
53 [BPF_REG_8] = A64_R(21),
54 [BPF_REG_9] = A64_R(22),
55 /* read-only frame pointer to access stack */
56 [BPF_REG_FP] = A64_R(25),
57 /* temporary registers for internal BPF JIT */
58 [TMP_REG_1] = A64_R(10),
59 [TMP_REG_2] = A64_R(11),
61 [TCALL_CNT] = A64_R(26),
62 /* temporary register for blinding constants */
63 [BPF_REG_AX] = A64_R(9),
67 const struct bpf_prog *prog;
74 static inline void emit(const u32 insn, struct jit_ctx *ctx)
76 if (ctx->image != NULL)
77 ctx->image[ctx->idx] = cpu_to_le32(insn);
82 static inline void emit_a64_mov_i64(const int reg, const u64 val,
88 emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx);
93 emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
99 static inline void emit_a64_mov_i(const int is64, const int reg,
100 const s32 val, struct jit_ctx *ctx)
103 u16 lo = val & 0xffff;
107 emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
109 emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
110 emit(A64_MOVK(is64, reg, lo, 0), ctx);
113 emit(A64_MOVZ(is64, reg, lo, 0), ctx);
115 emit(A64_MOVK(is64, reg, hi, 16), ctx);
119 static inline int bpf2a64_offset(int bpf_to, int bpf_from,
120 const struct jit_ctx *ctx)
122 int to = ctx->offset[bpf_to];
123 /* -1 to account for the Branch instruction */
124 int from = ctx->offset[bpf_from] - 1;
129 static void jit_fill_hole(void *area, unsigned int size)
132 /* We are guaranteed to have aligned memory. */
133 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
134 *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
137 static inline int epilogue_offset(const struct jit_ctx *ctx)
139 int to = ctx->epilogue_offset;
145 /* Stack must be multiples of 16B */
146 #define STACK_ALIGN(sz) (((sz) + 15) & ~15)
148 #define _STACK_SIZE \
150 + 4 /* extra for skb_copy_bits buffer */)
152 #define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
154 #define PROLOGUE_OFFSET 8
156 static int build_prologue(struct jit_ctx *ctx)
158 const u8 r6 = bpf2a64[BPF_REG_6];
159 const u8 r7 = bpf2a64[BPF_REG_7];
160 const u8 r8 = bpf2a64[BPF_REG_8];
161 const u8 r9 = bpf2a64[BPF_REG_9];
162 const u8 fp = bpf2a64[BPF_REG_FP];
163 const u8 tcc = bpf2a64[TCALL_CNT];
164 const int idx0 = ctx->idx;
168 * BPF prog stack layout
171 * original A64_SP => 0:+-----+ BPF prologue
173 * current A64_FP => -16:+-----+
174 * | ... | callee saved registers
175 * BPF fp register => -64:+-----+ <= (BPF_FP)
177 * | ... | BPF prog stack
179 * +-----+ <= (BPF_FP - MAX_BPF_STACK)
180 * |RSVD | JIT scratchpad
181 * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE)
183 * | ... | Function call stack
190 /* Save FP and LR registers to stay align with ARM64 AAPCS */
191 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
192 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
194 /* Save callee-saved registers */
195 emit(A64_PUSH(r6, r7, A64_SP), ctx);
196 emit(A64_PUSH(r8, r9, A64_SP), ctx);
197 emit(A64_PUSH(fp, tcc, A64_SP), ctx);
199 /* Set up BPF prog stack base register */
200 emit(A64_MOV(1, fp, A64_SP), ctx);
202 /* Initialize tail_call_cnt */
203 emit(A64_MOVZ(1, tcc, 0, 0), ctx);
205 /* Set up function call stack */
206 emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
208 cur_offset = ctx->idx - idx0;
209 if (cur_offset != PROLOGUE_OFFSET) {
210 pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
211 cur_offset, PROLOGUE_OFFSET);
217 static int out_offset = -1; /* initialized on the first pass of build_body() */
218 static int emit_bpf_tail_call(struct jit_ctx *ctx)
220 /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
221 const u8 r2 = bpf2a64[BPF_REG_2];
222 const u8 r3 = bpf2a64[BPF_REG_3];
224 const u8 tmp = bpf2a64[TMP_REG_1];
225 const u8 prg = bpf2a64[TMP_REG_2];
226 const u8 tcc = bpf2a64[TCALL_CNT];
227 const int idx0 = ctx->idx;
228 #define cur_offset (ctx->idx - idx0)
229 #define jmp_offset (out_offset - (cur_offset))
232 /* if (index >= array->map.max_entries)
235 off = offsetof(struct bpf_array, map.max_entries);
236 emit_a64_mov_i64(tmp, off, ctx);
237 emit(A64_LDR32(tmp, r2, tmp), ctx);
238 emit(A64_CMP(0, r3, tmp), ctx);
239 emit(A64_B_(A64_COND_GE, jmp_offset), ctx);
241 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
245 emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
246 emit(A64_CMP(1, tcc, tmp), ctx);
247 emit(A64_B_(A64_COND_GT, jmp_offset), ctx);
248 emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
250 /* prog = array->ptrs[index];
254 off = offsetof(struct bpf_array, ptrs);
255 emit_a64_mov_i64(tmp, off, ctx);
256 emit(A64_LDR64(tmp, r2, tmp), ctx);
257 emit(A64_LDR64(prg, tmp, r3), ctx);
258 emit(A64_CBZ(1, prg, jmp_offset), ctx);
260 /* goto *(prog->bpf_func + prologue_size); */
261 off = offsetof(struct bpf_prog, bpf_func);
262 emit_a64_mov_i64(tmp, off, ctx);
263 emit(A64_LDR64(tmp, prg, tmp), ctx);
264 emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
265 emit(A64_BR(tmp), ctx);
268 if (out_offset == -1)
269 out_offset = cur_offset;
270 if (cur_offset != out_offset) {
271 pr_err_once("tail_call out_offset = %d, expected %d!\n",
272 cur_offset, out_offset);
280 static void build_epilogue(struct jit_ctx *ctx)
282 const u8 r0 = bpf2a64[BPF_REG_0];
283 const u8 r6 = bpf2a64[BPF_REG_6];
284 const u8 r7 = bpf2a64[BPF_REG_7];
285 const u8 r8 = bpf2a64[BPF_REG_8];
286 const u8 r9 = bpf2a64[BPF_REG_9];
287 const u8 fp = bpf2a64[BPF_REG_FP];
289 /* We're done with BPF stack */
290 emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
292 /* Restore fs (x25) and x26 */
293 emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
295 /* Restore callee-saved register */
296 emit(A64_POP(r8, r9, A64_SP), ctx);
297 emit(A64_POP(r6, r7, A64_SP), ctx);
299 /* Restore FP/LR registers */
300 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
302 /* Set return value */
303 emit(A64_MOV(1, A64_R(0), r0), ctx);
305 emit(A64_RET(A64_LR), ctx);
308 /* JITs an eBPF instruction.
310 * 0 - successfully JITed an 8-byte eBPF instruction.
311 * >0 - successfully JITed a 16-byte eBPF instruction.
312 * <0 - failed to JIT.
314 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
316 const u8 code = insn->code;
317 const u8 dst = bpf2a64[insn->dst_reg];
318 const u8 src = bpf2a64[insn->src_reg];
319 const u8 tmp = bpf2a64[TMP_REG_1];
320 const u8 tmp2 = bpf2a64[TMP_REG_2];
321 const s16 off = insn->off;
322 const s32 imm = insn->imm;
323 const int i = insn - ctx->prog->insnsi;
324 const bool is64 = BPF_CLASS(code) == BPF_ALU64;
325 const bool isdw = BPF_SIZE(code) == BPF_DW;
329 #define check_imm(bits, imm) do { \
330 if ((((imm) > 0) && ((imm) >> (bits))) || \
331 (((imm) < 0) && (~(imm) >> (bits)))) { \
332 pr_info("[%2d] imm=%d(0x%x) out of range\n", \
337 #define check_imm19(imm) check_imm(19, imm)
338 #define check_imm26(imm) check_imm(26, imm)
342 case BPF_ALU | BPF_MOV | BPF_X:
343 case BPF_ALU64 | BPF_MOV | BPF_X:
344 emit(A64_MOV(is64, dst, src), ctx);
346 /* dst = dst OP src */
347 case BPF_ALU | BPF_ADD | BPF_X:
348 case BPF_ALU64 | BPF_ADD | BPF_X:
349 emit(A64_ADD(is64, dst, dst, src), ctx);
351 case BPF_ALU | BPF_SUB | BPF_X:
352 case BPF_ALU64 | BPF_SUB | BPF_X:
353 emit(A64_SUB(is64, dst, dst, src), ctx);
355 case BPF_ALU | BPF_AND | BPF_X:
356 case BPF_ALU64 | BPF_AND | BPF_X:
357 emit(A64_AND(is64, dst, dst, src), ctx);
359 case BPF_ALU | BPF_OR | BPF_X:
360 case BPF_ALU64 | BPF_OR | BPF_X:
361 emit(A64_ORR(is64, dst, dst, src), ctx);
363 case BPF_ALU | BPF_XOR | BPF_X:
364 case BPF_ALU64 | BPF_XOR | BPF_X:
365 emit(A64_EOR(is64, dst, dst, src), ctx);
367 case BPF_ALU | BPF_MUL | BPF_X:
368 case BPF_ALU64 | BPF_MUL | BPF_X:
369 emit(A64_MUL(is64, dst, dst, src), ctx);
371 case BPF_ALU | BPF_DIV | BPF_X:
372 case BPF_ALU64 | BPF_DIV | BPF_X:
373 case BPF_ALU | BPF_MOD | BPF_X:
374 case BPF_ALU64 | BPF_MOD | BPF_X:
376 const u8 r0 = bpf2a64[BPF_REG_0];
378 /* if (src == 0) return 0 */
379 jmp_offset = 3; /* skip ahead to else path */
380 check_imm19(jmp_offset);
381 emit(A64_CBNZ(is64, src, jmp_offset), ctx);
382 emit(A64_MOVZ(1, r0, 0, 0), ctx);
383 jmp_offset = epilogue_offset(ctx);
384 check_imm26(jmp_offset);
385 emit(A64_B(jmp_offset), ctx);
387 switch (BPF_OP(code)) {
389 emit(A64_UDIV(is64, dst, dst, src), ctx);
392 emit(A64_UDIV(is64, tmp, dst, src), ctx);
393 emit(A64_MUL(is64, tmp, tmp, src), ctx);
394 emit(A64_SUB(is64, dst, dst, tmp), ctx);
399 case BPF_ALU | BPF_LSH | BPF_X:
400 case BPF_ALU64 | BPF_LSH | BPF_X:
401 emit(A64_LSLV(is64, dst, dst, src), ctx);
403 case BPF_ALU | BPF_RSH | BPF_X:
404 case BPF_ALU64 | BPF_RSH | BPF_X:
405 emit(A64_LSRV(is64, dst, dst, src), ctx);
407 case BPF_ALU | BPF_ARSH | BPF_X:
408 case BPF_ALU64 | BPF_ARSH | BPF_X:
409 emit(A64_ASRV(is64, dst, dst, src), ctx);
412 case BPF_ALU | BPF_NEG:
413 case BPF_ALU64 | BPF_NEG:
414 emit(A64_NEG(is64, dst, dst), ctx);
416 /* dst = BSWAP##imm(dst) */
417 case BPF_ALU | BPF_END | BPF_FROM_LE:
418 case BPF_ALU | BPF_END | BPF_FROM_BE:
419 #ifdef CONFIG_CPU_BIG_ENDIAN
420 if (BPF_SRC(code) == BPF_FROM_BE)
422 #else /* !CONFIG_CPU_BIG_ENDIAN */
423 if (BPF_SRC(code) == BPF_FROM_LE)
428 emit(A64_REV16(is64, dst, dst), ctx);
429 /* zero-extend 16 bits into 64 bits */
430 emit(A64_UXTH(is64, dst, dst), ctx);
433 emit(A64_REV32(is64, dst, dst), ctx);
434 /* upper 32 bits already cleared */
437 emit(A64_REV64(dst, dst), ctx);
444 /* zero-extend 16 bits into 64 bits */
445 emit(A64_UXTH(is64, dst, dst), ctx);
448 /* zero-extend 32 bits into 64 bits */
449 emit(A64_UXTW(is64, dst, dst), ctx);
457 case BPF_ALU | BPF_MOV | BPF_K:
458 case BPF_ALU64 | BPF_MOV | BPF_K:
459 emit_a64_mov_i(is64, dst, imm, ctx);
461 /* dst = dst OP imm */
462 case BPF_ALU | BPF_ADD | BPF_K:
463 case BPF_ALU64 | BPF_ADD | BPF_K:
464 emit_a64_mov_i(is64, tmp, imm, ctx);
465 emit(A64_ADD(is64, dst, dst, tmp), ctx);
467 case BPF_ALU | BPF_SUB | BPF_K:
468 case BPF_ALU64 | BPF_SUB | BPF_K:
469 emit_a64_mov_i(is64, tmp, imm, ctx);
470 emit(A64_SUB(is64, dst, dst, tmp), ctx);
472 case BPF_ALU | BPF_AND | BPF_K:
473 case BPF_ALU64 | BPF_AND | BPF_K:
474 emit_a64_mov_i(is64, tmp, imm, ctx);
475 emit(A64_AND(is64, dst, dst, tmp), ctx);
477 case BPF_ALU | BPF_OR | BPF_K:
478 case BPF_ALU64 | BPF_OR | BPF_K:
479 emit_a64_mov_i(is64, tmp, imm, ctx);
480 emit(A64_ORR(is64, dst, dst, tmp), ctx);
482 case BPF_ALU | BPF_XOR | BPF_K:
483 case BPF_ALU64 | BPF_XOR | BPF_K:
484 emit_a64_mov_i(is64, tmp, imm, ctx);
485 emit(A64_EOR(is64, dst, dst, tmp), ctx);
487 case BPF_ALU | BPF_MUL | BPF_K:
488 case BPF_ALU64 | BPF_MUL | BPF_K:
489 emit_a64_mov_i(is64, tmp, imm, ctx);
490 emit(A64_MUL(is64, dst, dst, tmp), ctx);
492 case BPF_ALU | BPF_DIV | BPF_K:
493 case BPF_ALU64 | BPF_DIV | BPF_K:
494 emit_a64_mov_i(is64, tmp, imm, ctx);
495 emit(A64_UDIV(is64, dst, dst, tmp), ctx);
497 case BPF_ALU | BPF_MOD | BPF_K:
498 case BPF_ALU64 | BPF_MOD | BPF_K:
499 emit_a64_mov_i(is64, tmp2, imm, ctx);
500 emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
501 emit(A64_MUL(is64, tmp, tmp, tmp2), ctx);
502 emit(A64_SUB(is64, dst, dst, tmp), ctx);
504 case BPF_ALU | BPF_LSH | BPF_K:
505 case BPF_ALU64 | BPF_LSH | BPF_K:
506 emit(A64_LSL(is64, dst, dst, imm), ctx);
508 case BPF_ALU | BPF_RSH | BPF_K:
509 case BPF_ALU64 | BPF_RSH | BPF_K:
510 emit(A64_LSR(is64, dst, dst, imm), ctx);
512 case BPF_ALU | BPF_ARSH | BPF_K:
513 case BPF_ALU64 | BPF_ARSH | BPF_K:
514 emit(A64_ASR(is64, dst, dst, imm), ctx);
518 case BPF_JMP | BPF_JA:
519 jmp_offset = bpf2a64_offset(i + off, i, ctx);
520 check_imm26(jmp_offset);
521 emit(A64_B(jmp_offset), ctx);
523 /* IF (dst COND src) JUMP off */
524 case BPF_JMP | BPF_JEQ | BPF_X:
525 case BPF_JMP | BPF_JGT | BPF_X:
526 case BPF_JMP | BPF_JGE | BPF_X:
527 case BPF_JMP | BPF_JNE | BPF_X:
528 case BPF_JMP | BPF_JSGT | BPF_X:
529 case BPF_JMP | BPF_JSGE | BPF_X:
530 emit(A64_CMP(1, dst, src), ctx);
532 jmp_offset = bpf2a64_offset(i + off, i, ctx);
533 check_imm19(jmp_offset);
534 switch (BPF_OP(code)) {
536 jmp_cond = A64_COND_EQ;
539 jmp_cond = A64_COND_HI;
542 jmp_cond = A64_COND_CS;
546 jmp_cond = A64_COND_NE;
549 jmp_cond = A64_COND_GT;
552 jmp_cond = A64_COND_GE;
557 emit(A64_B_(jmp_cond, jmp_offset), ctx);
559 case BPF_JMP | BPF_JSET | BPF_X:
560 emit(A64_TST(1, dst, src), ctx);
562 /* IF (dst COND imm) JUMP off */
563 case BPF_JMP | BPF_JEQ | BPF_K:
564 case BPF_JMP | BPF_JGT | BPF_K:
565 case BPF_JMP | BPF_JGE | BPF_K:
566 case BPF_JMP | BPF_JNE | BPF_K:
567 case BPF_JMP | BPF_JSGT | BPF_K:
568 case BPF_JMP | BPF_JSGE | BPF_K:
569 emit_a64_mov_i(1, tmp, imm, ctx);
570 emit(A64_CMP(1, dst, tmp), ctx);
572 case BPF_JMP | BPF_JSET | BPF_K:
573 emit_a64_mov_i(1, tmp, imm, ctx);
574 emit(A64_TST(1, dst, tmp), ctx);
577 case BPF_JMP | BPF_CALL:
579 const u8 r0 = bpf2a64[BPF_REG_0];
580 const u64 func = (u64)__bpf_call_base + imm;
582 emit_a64_mov_i64(tmp, func, ctx);
583 emit(A64_BLR(tmp), ctx);
584 emit(A64_MOV(1, r0, A64_R(0)), ctx);
588 case BPF_JMP | BPF_CALL | BPF_X:
589 if (emit_bpf_tail_call(ctx))
592 /* function return */
593 case BPF_JMP | BPF_EXIT:
594 /* Optimization: when last instruction is EXIT,
595 simply fallthrough to epilogue. */
596 if (i == ctx->prog->len - 1)
598 jmp_offset = epilogue_offset(ctx);
599 check_imm26(jmp_offset);
600 emit(A64_B(jmp_offset), ctx);
604 case BPF_LD | BPF_IMM | BPF_DW:
606 const struct bpf_insn insn1 = insn[1];
609 imm64 = (u64)insn1.imm << 32 | (u32)imm;
610 emit_a64_mov_i64(dst, imm64, ctx);
615 /* LDX: dst = *(size *)(src + off) */
616 case BPF_LDX | BPF_MEM | BPF_W:
617 case BPF_LDX | BPF_MEM | BPF_H:
618 case BPF_LDX | BPF_MEM | BPF_B:
619 case BPF_LDX | BPF_MEM | BPF_DW:
620 emit_a64_mov_i(1, tmp, off, ctx);
621 switch (BPF_SIZE(code)) {
623 emit(A64_LDR32(dst, src, tmp), ctx);
626 emit(A64_LDRH(dst, src, tmp), ctx);
629 emit(A64_LDRB(dst, src, tmp), ctx);
632 emit(A64_LDR64(dst, src, tmp), ctx);
637 /* ST: *(size *)(dst + off) = imm */
638 case BPF_ST | BPF_MEM | BPF_W:
639 case BPF_ST | BPF_MEM | BPF_H:
640 case BPF_ST | BPF_MEM | BPF_B:
641 case BPF_ST | BPF_MEM | BPF_DW:
642 /* Load imm to a register then store it */
643 emit_a64_mov_i(1, tmp2, off, ctx);
644 emit_a64_mov_i(1, tmp, imm, ctx);
645 switch (BPF_SIZE(code)) {
647 emit(A64_STR32(tmp, dst, tmp2), ctx);
650 emit(A64_STRH(tmp, dst, tmp2), ctx);
653 emit(A64_STRB(tmp, dst, tmp2), ctx);
656 emit(A64_STR64(tmp, dst, tmp2), ctx);
661 /* STX: *(size *)(dst + off) = src */
662 case BPF_STX | BPF_MEM | BPF_W:
663 case BPF_STX | BPF_MEM | BPF_H:
664 case BPF_STX | BPF_MEM | BPF_B:
665 case BPF_STX | BPF_MEM | BPF_DW:
666 emit_a64_mov_i(1, tmp, off, ctx);
667 switch (BPF_SIZE(code)) {
669 emit(A64_STR32(src, dst, tmp), ctx);
672 emit(A64_STRH(src, dst, tmp), ctx);
675 emit(A64_STRB(src, dst, tmp), ctx);
678 emit(A64_STR64(src, dst, tmp), ctx);
682 /* STX XADD: lock *(u32 *)(dst + off) += src */
683 case BPF_STX | BPF_XADD | BPF_W:
684 /* STX XADD: lock *(u64 *)(dst + off) += src */
685 case BPF_STX | BPF_XADD | BPF_DW:
686 emit_a64_mov_i(1, tmp, off, ctx);
687 emit(A64_ADD(1, tmp, tmp, dst), ctx);
688 emit(A64_PRFM(tmp, PST, L1, STRM), ctx);
689 emit(A64_LDXR(isdw, tmp2, tmp), ctx);
690 emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
691 emit(A64_STXR(isdw, tmp2, tmp, tmp2), ctx);
693 check_imm19(jmp_offset);
694 emit(A64_CBNZ(0, tmp2, jmp_offset), ctx);
697 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
698 case BPF_LD | BPF_ABS | BPF_W:
699 case BPF_LD | BPF_ABS | BPF_H:
700 case BPF_LD | BPF_ABS | BPF_B:
701 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
702 case BPF_LD | BPF_IND | BPF_W:
703 case BPF_LD | BPF_IND | BPF_H:
704 case BPF_LD | BPF_IND | BPF_B:
706 const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */
707 const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */
708 const u8 fp = bpf2a64[BPF_REG_FP];
709 const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */
710 const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */
711 const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */
712 const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */
713 const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */
716 emit(A64_MOV(1, r1, r6), ctx);
717 emit_a64_mov_i(0, r2, imm, ctx);
718 if (BPF_MODE(code) == BPF_IND)
719 emit(A64_ADD(0, r2, r2, src), ctx);
720 switch (BPF_SIZE(code)) {
733 emit_a64_mov_i64(r3, size, ctx);
734 emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx);
735 emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
736 emit(A64_BLR(r5), ctx);
737 emit(A64_MOV(1, r0, A64_R(0)), ctx);
739 jmp_offset = epilogue_offset(ctx);
740 check_imm19(jmp_offset);
741 emit(A64_CBZ(1, r0, jmp_offset), ctx);
742 emit(A64_MOV(1, r5, r0), ctx);
743 switch (BPF_SIZE(code)) {
745 emit(A64_LDR32(r0, r5, A64_ZR), ctx);
746 #ifndef CONFIG_CPU_BIG_ENDIAN
747 emit(A64_REV32(0, r0, r0), ctx);
751 emit(A64_LDRH(r0, r5, A64_ZR), ctx);
752 #ifndef CONFIG_CPU_BIG_ENDIAN
753 emit(A64_REV16(0, r0, r0), ctx);
757 emit(A64_LDRB(r0, r5, A64_ZR), ctx);
763 pr_err_once("unknown opcode %02x\n", code);
770 static int build_body(struct jit_ctx *ctx)
772 const struct bpf_prog *prog = ctx->prog;
775 for (i = 0; i < prog->len; i++) {
776 const struct bpf_insn *insn = &prog->insnsi[i];
779 ret = build_insn(insn, ctx);
782 if (ctx->image == NULL)
783 ctx->offset[i] = ctx->idx;
786 if (ctx->image == NULL)
787 ctx->offset[i] = ctx->idx;
795 static int validate_code(struct jit_ctx *ctx)
799 for (i = 0; i < ctx->idx; i++) {
800 u32 a64_insn = le32_to_cpu(ctx->image[i]);
802 if (a64_insn == AARCH64_BREAK_FAULT)
809 static inline void bpf_flush_icache(void *start, void *end)
811 flush_icache_range((unsigned long)start, (unsigned long)end);
814 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
816 struct bpf_prog *tmp, *orig_prog = prog;
817 struct bpf_binary_header *header;
818 bool tmp_blinded = false;
826 tmp = bpf_jit_blind_constants(prog);
827 /* If blinding was requested and we failed during blinding,
828 * we must fall back to the interpreter.
837 memset(&ctx, 0, sizeof(ctx));
840 ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
841 if (ctx.offset == NULL) {
846 /* 1. Initial fake pass to compute ctx->idx. */
848 /* Fake pass to fill in ctx->offset. */
849 if (build_body(&ctx)) {
854 if (build_prologue(&ctx)) {
859 ctx.epilogue_offset = ctx.idx;
860 build_epilogue(&ctx);
862 /* Now we know the actual image size. */
863 image_size = sizeof(u32) * ctx.idx;
864 header = bpf_jit_binary_alloc(image_size, &image_ptr,
865 sizeof(u32), jit_fill_hole);
866 if (header == NULL) {
871 /* 2. Now, the actual pass. */
873 ctx.image = (u32 *)image_ptr;
876 build_prologue(&ctx);
878 if (build_body(&ctx)) {
879 bpf_jit_binary_free(header);
884 build_epilogue(&ctx);
886 /* 3. Extra pass to validate JITed code. */
887 if (validate_code(&ctx)) {
888 bpf_jit_binary_free(header);
893 /* And we're done. */
894 if (bpf_jit_enable > 1)
895 bpf_jit_dump(prog->len, image_size, 2, ctx.image);
897 bpf_flush_icache(header, ctx.image + ctx.idx);
899 bpf_jit_binary_lock_ro(header);
900 prog->bpf_func = (void *)ctx.image;
907 bpf_jit_prog_release_other(prog, prog == orig_prog ?