2 * BPF JIT compiler for ARM64
4 * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #define pr_fmt(fmt) "bpf_jit: " fmt
21 #include <linux/filter.h>
22 #include <linux/printk.h>
23 #include <linux/skbuff.h>
24 #include <linux/slab.h>
26 #include <asm/byteorder.h>
27 #include <asm/cacheflush.h>
28 #include <asm/debug-monitors.h>
32 int bpf_jit_enable __read_mostly;
34 #define TMP_REG_1 (MAX_BPF_REG + 0)
35 #define TMP_REG_2 (MAX_BPF_REG + 1)
37 /* Map BPF registers to A64 registers */
38 static const int bpf2a64[] = {
39 /* return value from in-kernel function, and exit value from eBPF */
40 [BPF_REG_0] = A64_R(7),
41 /* arguments from eBPF program to in-kernel function */
42 [BPF_REG_1] = A64_R(0),
43 [BPF_REG_2] = A64_R(1),
44 [BPF_REG_3] = A64_R(2),
45 [BPF_REG_4] = A64_R(3),
46 [BPF_REG_5] = A64_R(4),
47 /* callee saved registers that in-kernel function will preserve */
48 [BPF_REG_6] = A64_R(19),
49 [BPF_REG_7] = A64_R(20),
50 [BPF_REG_8] = A64_R(21),
51 [BPF_REG_9] = A64_R(22),
52 /* read-only frame pointer to access stack */
53 [BPF_REG_FP] = A64_FP,
54 /* temporary register for internal BPF JIT */
55 [TMP_REG_1] = A64_R(23),
56 [TMP_REG_2] = A64_R(24),
60 const struct bpf_prog *prog;
68 static inline void emit(const u32 insn, struct jit_ctx *ctx)
70 if (ctx->image != NULL)
71 ctx->image[ctx->idx] = cpu_to_le32(insn);
76 static inline void emit_a64_mov_i64(const int reg, const u64 val,
82 emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx);
87 emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
93 static inline void emit_a64_mov_i(const int is64, const int reg,
94 const s32 val, struct jit_ctx *ctx)
97 u16 lo = val & 0xffff;
101 emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
103 emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
104 emit(A64_MOVK(is64, reg, lo, 0), ctx);
107 emit(A64_MOVZ(is64, reg, lo, 0), ctx);
109 emit(A64_MOVK(is64, reg, hi, 16), ctx);
113 static inline int bpf2a64_offset(int bpf_to, int bpf_from,
114 const struct jit_ctx *ctx)
116 int to = ctx->offset[bpf_to];
117 /* -1 to account for the Branch instruction */
118 int from = ctx->offset[bpf_from] - 1;
123 static void jit_fill_hole(void *area, unsigned int size)
126 /* We are guaranteed to have aligned memory. */
127 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
128 *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
131 static inline int epilogue_offset(const struct jit_ctx *ctx)
133 int to = ctx->epilogue_offset;
139 /* Stack must be multiples of 16B */
140 #define STACK_ALIGN(sz) (((sz) + 15) & ~15)
142 static void build_prologue(struct jit_ctx *ctx)
144 const u8 r6 = bpf2a64[BPF_REG_6];
145 const u8 r7 = bpf2a64[BPF_REG_7];
146 const u8 r8 = bpf2a64[BPF_REG_8];
147 const u8 r9 = bpf2a64[BPF_REG_9];
148 const u8 fp = bpf2a64[BPF_REG_FP];
149 const u8 ra = bpf2a64[BPF_REG_A];
150 const u8 rx = bpf2a64[BPF_REG_X];
151 const u8 tmp1 = bpf2a64[TMP_REG_1];
152 const u8 tmp2 = bpf2a64[TMP_REG_2];
153 int stack_size = MAX_BPF_STACK;
155 stack_size += 4; /* extra for skb_copy_bits buffer */
156 stack_size = STACK_ALIGN(stack_size);
158 /* Save callee-saved register */
159 emit(A64_PUSH(r6, r7, A64_SP), ctx);
160 emit(A64_PUSH(r8, r9, A64_SP), ctx);
162 emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx);
164 /* Set up BPF stack */
165 emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
167 /* Set up frame pointer */
168 emit(A64_MOV(1, fp, A64_SP), ctx);
170 /* Clear registers A and X */
171 emit_a64_mov_i64(ra, 0, ctx);
172 emit_a64_mov_i64(rx, 0, ctx);
175 static void build_epilogue(struct jit_ctx *ctx)
177 const u8 r0 = bpf2a64[BPF_REG_0];
178 const u8 r6 = bpf2a64[BPF_REG_6];
179 const u8 r7 = bpf2a64[BPF_REG_7];
180 const u8 r8 = bpf2a64[BPF_REG_8];
181 const u8 r9 = bpf2a64[BPF_REG_9];
182 const u8 fp = bpf2a64[BPF_REG_FP];
183 const u8 tmp1 = bpf2a64[TMP_REG_1];
184 const u8 tmp2 = bpf2a64[TMP_REG_2];
185 int stack_size = MAX_BPF_STACK;
187 stack_size += 4; /* extra for skb_copy_bits buffer */
188 stack_size = STACK_ALIGN(stack_size);
190 /* We're done with BPF stack */
191 emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx);
193 /* Restore callee-saved register */
195 emit(A64_POP(tmp1, tmp2, A64_SP), ctx);
196 emit(A64_POP(r8, r9, A64_SP), ctx);
197 emit(A64_POP(r6, r7, A64_SP), ctx);
199 /* Restore frame pointer */
200 emit(A64_MOV(1, fp, A64_SP), ctx);
202 /* Set return value */
203 emit(A64_MOV(1, A64_R(0), r0), ctx);
205 emit(A64_RET(A64_LR), ctx);
208 /* JITs an eBPF instruction.
210 * 0 - successfully JITed an 8-byte eBPF instruction.
211 * >0 - successfully JITed a 16-byte eBPF instruction.
212 * <0 - failed to JIT.
214 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
216 const u8 code = insn->code;
217 const u8 dst = bpf2a64[insn->dst_reg];
218 const u8 src = bpf2a64[insn->src_reg];
219 const u8 tmp = bpf2a64[TMP_REG_1];
220 const u8 tmp2 = bpf2a64[TMP_REG_2];
221 const s16 off = insn->off;
222 const s32 imm = insn->imm;
223 const int i = insn - ctx->prog->insnsi;
224 const bool is64 = BPF_CLASS(code) == BPF_ALU64;
228 #define check_imm(bits, imm) do { \
229 if ((((imm) > 0) && ((imm) >> (bits))) || \
230 (((imm) < 0) && (~(imm) >> (bits)))) { \
231 pr_info("[%2d] imm=%d(0x%x) out of range\n", \
236 #define check_imm19(imm) check_imm(19, imm)
237 #define check_imm26(imm) check_imm(26, imm)
241 case BPF_ALU | BPF_MOV | BPF_X:
242 case BPF_ALU64 | BPF_MOV | BPF_X:
243 emit(A64_MOV(is64, dst, src), ctx);
245 /* dst = dst OP src */
246 case BPF_ALU | BPF_ADD | BPF_X:
247 case BPF_ALU64 | BPF_ADD | BPF_X:
248 emit(A64_ADD(is64, dst, dst, src), ctx);
250 case BPF_ALU | BPF_SUB | BPF_X:
251 case BPF_ALU64 | BPF_SUB | BPF_X:
252 emit(A64_SUB(is64, dst, dst, src), ctx);
254 case BPF_ALU | BPF_AND | BPF_X:
255 case BPF_ALU64 | BPF_AND | BPF_X:
256 emit(A64_AND(is64, dst, dst, src), ctx);
258 case BPF_ALU | BPF_OR | BPF_X:
259 case BPF_ALU64 | BPF_OR | BPF_X:
260 emit(A64_ORR(is64, dst, dst, src), ctx);
262 case BPF_ALU | BPF_XOR | BPF_X:
263 case BPF_ALU64 | BPF_XOR | BPF_X:
264 emit(A64_EOR(is64, dst, dst, src), ctx);
266 case BPF_ALU | BPF_MUL | BPF_X:
267 case BPF_ALU64 | BPF_MUL | BPF_X:
268 emit(A64_MUL(is64, dst, dst, src), ctx);
270 case BPF_ALU | BPF_DIV | BPF_X:
271 case BPF_ALU64 | BPF_DIV | BPF_X:
272 case BPF_ALU | BPF_MOD | BPF_X:
273 case BPF_ALU64 | BPF_MOD | BPF_X:
275 const u8 r0 = bpf2a64[BPF_REG_0];
277 /* if (src == 0) return 0 */
278 jmp_offset = 3; /* skip ahead to else path */
279 check_imm19(jmp_offset);
280 emit(A64_CBNZ(is64, src, jmp_offset), ctx);
281 emit(A64_MOVZ(1, r0, 0, 0), ctx);
282 jmp_offset = epilogue_offset(ctx);
283 check_imm26(jmp_offset);
284 emit(A64_B(jmp_offset), ctx);
286 switch (BPF_OP(code)) {
288 emit(A64_UDIV(is64, dst, dst, src), ctx);
292 emit(A64_UDIV(is64, tmp, dst, src), ctx);
293 emit(A64_MUL(is64, tmp, tmp, src), ctx);
294 emit(A64_SUB(is64, dst, dst, tmp), ctx);
299 case BPF_ALU | BPF_LSH | BPF_X:
300 case BPF_ALU64 | BPF_LSH | BPF_X:
301 emit(A64_LSLV(is64, dst, dst, src), ctx);
303 case BPF_ALU | BPF_RSH | BPF_X:
304 case BPF_ALU64 | BPF_RSH | BPF_X:
305 emit(A64_LSRV(is64, dst, dst, src), ctx);
307 case BPF_ALU | BPF_ARSH | BPF_X:
308 case BPF_ALU64 | BPF_ARSH | BPF_X:
309 emit(A64_ASRV(is64, dst, dst, src), ctx);
312 case BPF_ALU | BPF_NEG:
313 case BPF_ALU64 | BPF_NEG:
314 emit(A64_NEG(is64, dst, dst), ctx);
316 /* dst = BSWAP##imm(dst) */
317 case BPF_ALU | BPF_END | BPF_FROM_LE:
318 case BPF_ALU | BPF_END | BPF_FROM_BE:
319 #ifdef CONFIG_CPU_BIG_ENDIAN
320 if (BPF_SRC(code) == BPF_FROM_BE)
322 #else /* !CONFIG_CPU_BIG_ENDIAN */
323 if (BPF_SRC(code) == BPF_FROM_LE)
328 emit(A64_REV16(is64, dst, dst), ctx);
329 /* zero-extend 16 bits into 64 bits */
330 emit(A64_UXTH(is64, dst, dst), ctx);
333 emit(A64_REV32(is64, dst, dst), ctx);
334 /* upper 32 bits already cleared */
337 emit(A64_REV64(dst, dst), ctx);
344 /* zero-extend 16 bits into 64 bits */
345 emit(A64_UXTH(is64, dst, dst), ctx);
348 /* zero-extend 32 bits into 64 bits */
349 emit(A64_UXTW(is64, dst, dst), ctx);
357 case BPF_ALU | BPF_MOV | BPF_K:
358 case BPF_ALU64 | BPF_MOV | BPF_K:
359 emit_a64_mov_i(is64, dst, imm, ctx);
361 /* dst = dst OP imm */
362 case BPF_ALU | BPF_ADD | BPF_K:
363 case BPF_ALU64 | BPF_ADD | BPF_K:
365 emit_a64_mov_i(is64, tmp, imm, ctx);
366 emit(A64_ADD(is64, dst, dst, tmp), ctx);
368 case BPF_ALU | BPF_SUB | BPF_K:
369 case BPF_ALU64 | BPF_SUB | BPF_K:
371 emit_a64_mov_i(is64, tmp, imm, ctx);
372 emit(A64_SUB(is64, dst, dst, tmp), ctx);
374 case BPF_ALU | BPF_AND | BPF_K:
375 case BPF_ALU64 | BPF_AND | BPF_K:
377 emit_a64_mov_i(is64, tmp, imm, ctx);
378 emit(A64_AND(is64, dst, dst, tmp), ctx);
380 case BPF_ALU | BPF_OR | BPF_K:
381 case BPF_ALU64 | BPF_OR | BPF_K:
383 emit_a64_mov_i(is64, tmp, imm, ctx);
384 emit(A64_ORR(is64, dst, dst, tmp), ctx);
386 case BPF_ALU | BPF_XOR | BPF_K:
387 case BPF_ALU64 | BPF_XOR | BPF_K:
389 emit_a64_mov_i(is64, tmp, imm, ctx);
390 emit(A64_EOR(is64, dst, dst, tmp), ctx);
392 case BPF_ALU | BPF_MUL | BPF_K:
393 case BPF_ALU64 | BPF_MUL | BPF_K:
395 emit_a64_mov_i(is64, tmp, imm, ctx);
396 emit(A64_MUL(is64, dst, dst, tmp), ctx);
398 case BPF_ALU | BPF_DIV | BPF_K:
399 case BPF_ALU64 | BPF_DIV | BPF_K:
401 emit_a64_mov_i(is64, tmp, imm, ctx);
402 emit(A64_UDIV(is64, dst, dst, tmp), ctx);
404 case BPF_ALU | BPF_MOD | BPF_K:
405 case BPF_ALU64 | BPF_MOD | BPF_K:
407 emit_a64_mov_i(is64, tmp2, imm, ctx);
408 emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
409 emit(A64_MUL(is64, tmp, tmp, tmp2), ctx);
410 emit(A64_SUB(is64, dst, dst, tmp), ctx);
412 case BPF_ALU | BPF_LSH | BPF_K:
413 case BPF_ALU64 | BPF_LSH | BPF_K:
414 emit(A64_LSL(is64, dst, dst, imm), ctx);
416 case BPF_ALU | BPF_RSH | BPF_K:
417 case BPF_ALU64 | BPF_RSH | BPF_K:
418 emit(A64_LSR(is64, dst, dst, imm), ctx);
420 case BPF_ALU | BPF_ARSH | BPF_K:
421 case BPF_ALU64 | BPF_ARSH | BPF_K:
422 emit(A64_ASR(is64, dst, dst, imm), ctx);
426 case BPF_JMP | BPF_JA:
427 jmp_offset = bpf2a64_offset(i + off, i, ctx);
428 check_imm26(jmp_offset);
429 emit(A64_B(jmp_offset), ctx);
431 /* IF (dst COND src) JUMP off */
432 case BPF_JMP | BPF_JEQ | BPF_X:
433 case BPF_JMP | BPF_JGT | BPF_X:
434 case BPF_JMP | BPF_JGE | BPF_X:
435 case BPF_JMP | BPF_JNE | BPF_X:
436 case BPF_JMP | BPF_JSGT | BPF_X:
437 case BPF_JMP | BPF_JSGE | BPF_X:
438 emit(A64_CMP(1, dst, src), ctx);
440 jmp_offset = bpf2a64_offset(i + off, i, ctx);
441 check_imm19(jmp_offset);
442 switch (BPF_OP(code)) {
444 jmp_cond = A64_COND_EQ;
447 jmp_cond = A64_COND_HI;
450 jmp_cond = A64_COND_CS;
453 jmp_cond = A64_COND_NE;
456 jmp_cond = A64_COND_GT;
459 jmp_cond = A64_COND_GE;
464 emit(A64_B_(jmp_cond, jmp_offset), ctx);
466 case BPF_JMP | BPF_JSET | BPF_X:
467 emit(A64_TST(1, dst, src), ctx);
469 /* IF (dst COND imm) JUMP off */
470 case BPF_JMP | BPF_JEQ | BPF_K:
471 case BPF_JMP | BPF_JGT | BPF_K:
472 case BPF_JMP | BPF_JGE | BPF_K:
473 case BPF_JMP | BPF_JNE | BPF_K:
474 case BPF_JMP | BPF_JSGT | BPF_K:
475 case BPF_JMP | BPF_JSGE | BPF_K:
477 emit_a64_mov_i(1, tmp, imm, ctx);
478 emit(A64_CMP(1, dst, tmp), ctx);
480 case BPF_JMP | BPF_JSET | BPF_K:
482 emit_a64_mov_i(1, tmp, imm, ctx);
483 emit(A64_TST(1, dst, tmp), ctx);
486 case BPF_JMP | BPF_CALL:
488 const u8 r0 = bpf2a64[BPF_REG_0];
489 const u64 func = (u64)__bpf_call_base + imm;
492 emit_a64_mov_i64(tmp, func, ctx);
493 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
494 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
495 emit(A64_BLR(tmp), ctx);
496 emit(A64_MOV(1, r0, A64_R(0)), ctx);
497 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
500 /* function return */
501 case BPF_JMP | BPF_EXIT:
502 /* Optimization: when last instruction is EXIT,
503 simply fallthrough to epilogue. */
504 if (i == ctx->prog->len - 1)
506 jmp_offset = epilogue_offset(ctx);
507 check_imm26(jmp_offset);
508 emit(A64_B(jmp_offset), ctx);
512 case BPF_LD | BPF_IMM | BPF_DW:
514 const struct bpf_insn insn1 = insn[1];
517 if (insn1.code != 0 || insn1.src_reg != 0 ||
518 insn1.dst_reg != 0 || insn1.off != 0) {
519 /* Note: verifier in BPF core must catch invalid
522 pr_err_once("Invalid BPF_LD_IMM64 instruction\n");
526 imm64 = (u64)insn1.imm << 32 | (u32)imm;
527 emit_a64_mov_i64(dst, imm64, ctx);
532 /* LDX: dst = *(size *)(src + off) */
533 case BPF_LDX | BPF_MEM | BPF_W:
534 case BPF_LDX | BPF_MEM | BPF_H:
535 case BPF_LDX | BPF_MEM | BPF_B:
536 case BPF_LDX | BPF_MEM | BPF_DW:
538 emit_a64_mov_i(1, tmp, off, ctx);
539 switch (BPF_SIZE(code)) {
541 emit(A64_LDR32(dst, src, tmp), ctx);
544 emit(A64_LDRH(dst, src, tmp), ctx);
547 emit(A64_LDRB(dst, src, tmp), ctx);
550 emit(A64_LDR64(dst, src, tmp), ctx);
555 /* ST: *(size *)(dst + off) = imm */
556 case BPF_ST | BPF_MEM | BPF_W:
557 case BPF_ST | BPF_MEM | BPF_H:
558 case BPF_ST | BPF_MEM | BPF_B:
559 case BPF_ST | BPF_MEM | BPF_DW:
562 /* STX: *(size *)(dst + off) = src */
563 case BPF_STX | BPF_MEM | BPF_W:
564 case BPF_STX | BPF_MEM | BPF_H:
565 case BPF_STX | BPF_MEM | BPF_B:
566 case BPF_STX | BPF_MEM | BPF_DW:
568 emit_a64_mov_i(1, tmp, off, ctx);
569 switch (BPF_SIZE(code)) {
571 emit(A64_STR32(src, dst, tmp), ctx);
574 emit(A64_STRH(src, dst, tmp), ctx);
577 emit(A64_STRB(src, dst, tmp), ctx);
580 emit(A64_STR64(src, dst, tmp), ctx);
584 /* STX XADD: lock *(u32 *)(dst + off) += src */
585 case BPF_STX | BPF_XADD | BPF_W:
586 /* STX XADD: lock *(u64 *)(dst + off) += src */
587 case BPF_STX | BPF_XADD | BPF_DW:
590 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
591 case BPF_LD | BPF_ABS | BPF_W:
592 case BPF_LD | BPF_ABS | BPF_H:
593 case BPF_LD | BPF_ABS | BPF_B:
594 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
595 case BPF_LD | BPF_IND | BPF_W:
596 case BPF_LD | BPF_IND | BPF_H:
597 case BPF_LD | BPF_IND | BPF_B:
599 const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */
600 const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */
601 const u8 fp = bpf2a64[BPF_REG_FP];
602 const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */
603 const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */
604 const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */
605 const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */
606 const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */
609 emit(A64_MOV(1, r1, r6), ctx);
610 emit_a64_mov_i(0, r2, imm, ctx);
611 if (BPF_MODE(code) == BPF_IND)
612 emit(A64_ADD(0, r2, r2, src), ctx);
613 switch (BPF_SIZE(code)) {
626 emit_a64_mov_i64(r3, size, ctx);
627 emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx);
628 emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
629 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
630 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
631 emit(A64_BLR(r5), ctx);
632 emit(A64_MOV(1, r0, A64_R(0)), ctx);
633 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
635 jmp_offset = epilogue_offset(ctx);
636 check_imm19(jmp_offset);
637 emit(A64_CBZ(1, r0, jmp_offset), ctx);
638 emit(A64_MOV(1, r5, r0), ctx);
639 switch (BPF_SIZE(code)) {
641 emit(A64_LDR32(r0, r5, A64_ZR), ctx);
642 #ifndef CONFIG_CPU_BIG_ENDIAN
643 emit(A64_REV32(0, r0, r0), ctx);
647 emit(A64_LDRH(r0, r5, A64_ZR), ctx);
648 #ifndef CONFIG_CPU_BIG_ENDIAN
649 emit(A64_REV16(0, r0, r0), ctx);
653 emit(A64_LDRB(r0, r5, A64_ZR), ctx);
659 pr_info_once("*** NOT YET: opcode %02x ***\n", code);
663 pr_err_once("unknown opcode %02x\n", code);
670 static int build_body(struct jit_ctx *ctx)
672 const struct bpf_prog *prog = ctx->prog;
675 for (i = 0; i < prog->len; i++) {
676 const struct bpf_insn *insn = &prog->insnsi[i];
679 ret = build_insn(insn, ctx);
681 if (ctx->image == NULL)
682 ctx->offset[i] = ctx->idx;
695 static inline void bpf_flush_icache(void *start, void *end)
697 flush_icache_range((unsigned long)start, (unsigned long)end);
700 void bpf_jit_compile(struct bpf_prog *prog)
702 /* Nothing to do here. We support Internal BPF. */
705 void bpf_int_jit_compile(struct bpf_prog *prog)
707 struct bpf_binary_header *header;
715 if (!prog || !prog->len)
718 memset(&ctx, 0, sizeof(ctx));
721 ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
722 if (ctx.offset == NULL)
725 /* 1. Initial fake pass to compute ctx->idx. */
727 /* Fake pass to fill in ctx->offset and ctx->tmp_used. */
728 if (build_body(&ctx))
731 build_prologue(&ctx);
733 ctx.epilogue_offset = ctx.idx;
734 build_epilogue(&ctx);
736 /* Now we know the actual image size. */
737 image_size = sizeof(u32) * ctx.idx;
738 header = bpf_jit_binary_alloc(image_size, &image_ptr,
739 sizeof(u32), jit_fill_hole);
743 /* 2. Now, the actual pass. */
745 ctx.image = (u32 *)image_ptr;
748 build_prologue(&ctx);
750 if (build_body(&ctx)) {
751 bpf_jit_binary_free(header);
755 build_epilogue(&ctx);
757 /* And we're done. */
758 if (bpf_jit_enable > 1)
759 bpf_jit_dump(prog->len, image_size, 2, ctx.image);
761 bpf_flush_icache(ctx.image, ctx.image + ctx.idx);
763 set_memory_ro((unsigned long)header, header->pages);
764 prog->bpf_func = (void *)ctx.image;
770 void bpf_jit_free(struct bpf_prog *prog)
772 unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK;
773 struct bpf_binary_header *header = (void *)addr;
778 set_memory_rw(addr, header->pages);
779 bpf_jit_binary_free(header);
782 bpf_prog_unlock_free(prog);