2 * Just-In-Time compiler for BPF filters on MIPS
4 * Copyright (c) 2014 Imagination Technologies Ltd.
5 * Author: Markos Chandras <markos.chandras@imgtec.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; version 2 of the License.
12 #include <linux/bitops.h>
13 #include <linux/compiler.h>
14 #include <linux/errno.h>
15 #include <linux/filter.h>
16 #include <linux/if_vlan.h>
17 #include <linux/moduleloader.h>
18 #include <linux/netdevice.h>
19 #include <linux/string.h>
20 #include <linux/slab.h>
21 #include <linux/types.h>
23 #include <asm/bitops.h>
24 #include <asm/cacheflush.h>
25 #include <asm/cpu-features.h>
31 * r_skb_hl SKB header length
32 * r_data SKB data pointer
38 * r_skb_len SKB length
40 * On entry (*bpf_func)(*skb, *filter)
41 * a0 = MIPS_R_A0 = skb;
42 * a1 = MIPS_R_A1 = filter;
54 * saved reg 0 <-- r_sp
59 * <--------------------- len ------------------------>
60 * <--skb-len(r_skb_hl)-->< ----- skb->data_len ------>
61 * ----------------------------------------------------
63 * ----------------------------------------------------
66 #define ptr typeof(unsigned long)
68 #define SCRATCH_OFF(k) (4 * (k))
71 #define SEEN_CALL (1 << BPF_MEMWORDS)
72 #define SEEN_SREG_SFT (BPF_MEMWORDS + 1)
73 #define SEEN_SREG_BASE (1 << SEEN_SREG_SFT)
74 #define SEEN_SREG(x) (SEEN_SREG_BASE << (x))
75 #define SEEN_OFF SEEN_SREG(2)
76 #define SEEN_A SEEN_SREG(3)
77 #define SEEN_X SEEN_SREG(4)
78 #define SEEN_SKB SEEN_SREG(5)
79 #define SEEN_MEM SEEN_SREG(6)
80 /* SEEN_SK_DATA also implies skb_hl an skb_len */
81 #define SEEN_SKB_DATA (SEEN_SREG(7) | SEEN_SREG(1) | SEEN_SREG(0))
83 /* Arguments used by JIT */
84 #define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */
86 #define SBIT(x) (1 << (x)) /* Signed version of BIT() */
89 * struct jit_ctx - JIT context
91 * @prologue_bytes: Number of bytes for prologue
92 * @idx: Instruction index
94 * @offsets: Instruction offsets
95 * @target: Memory location for the compiled filter
98 const struct bpf_prog *skf;
99 unsigned int prologue_bytes;
107 static inline int optimize_div(u32 *k)
109 /* power of 2 divides can be implemented with right shift */
110 if (!(*k & (*k-1))) {
118 static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
120 /* Simply emit the instruction if the JIT memory space has been allocated */
121 #define emit_instr(ctx, func, ...) \
123 if ((ctx)->target != NULL) { \
124 u32 *p = &(ctx)->target[ctx->idx]; \
125 uasm_i_##func(&p, ##__VA_ARGS__); \
131 * Similar to emit_instr but it must be used when we need to emit
132 * 32-bit or 64-bit instructions
134 #define emit_long_instr(ctx, func, ...) \
136 if ((ctx)->target != NULL) { \
137 u32 *p = &(ctx)->target[ctx->idx]; \
138 UASM_i_##func(&p, ##__VA_ARGS__); \
143 /* Determine if immediate is within the 16-bit signed range */
144 static inline bool is_range16(s32 imm)
146 return !(imm >= SBIT(15) || imm < -SBIT(15));
149 static inline void emit_addu(unsigned int dst, unsigned int src1,
150 unsigned int src2, struct jit_ctx *ctx)
152 emit_instr(ctx, addu, dst, src1, src2);
155 static inline void emit_nop(struct jit_ctx *ctx)
157 emit_instr(ctx, nop);
160 /* Load a u32 immediate to a register */
161 static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
163 if (ctx->target != NULL) {
164 /* addiu can only handle s16 */
165 if (!is_range16(imm)) {
166 u32 *p = &ctx->target[ctx->idx];
167 uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
168 p = &ctx->target[ctx->idx + 1];
169 uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff);
171 u32 *p = &ctx->target[ctx->idx];
172 uasm_i_addiu(&p, dst, r_zero, imm);
177 if (!is_range16(imm))
181 static inline void emit_or(unsigned int dst, unsigned int src1,
182 unsigned int src2, struct jit_ctx *ctx)
184 emit_instr(ctx, or, dst, src1, src2);
187 static inline void emit_ori(unsigned int dst, unsigned src, u32 imm,
190 if (imm >= BIT(16)) {
191 emit_load_imm(r_tmp, imm, ctx);
192 emit_or(dst, src, r_tmp, ctx);
194 emit_instr(ctx, ori, dst, src, imm);
198 static inline void emit_daddiu(unsigned int dst, unsigned int src,
199 int imm, struct jit_ctx *ctx)
202 * Only used for stack, so the imm is relatively small
203 * and it fits in 15-bits
205 emit_instr(ctx, daddiu, dst, src, imm);
208 static inline void emit_addiu(unsigned int dst, unsigned int src,
209 u32 imm, struct jit_ctx *ctx)
211 if (!is_range16(imm)) {
212 emit_load_imm(r_tmp, imm, ctx);
213 emit_addu(dst, r_tmp, src, ctx);
215 emit_instr(ctx, addiu, dst, src, imm);
219 static inline void emit_and(unsigned int dst, unsigned int src1,
220 unsigned int src2, struct jit_ctx *ctx)
222 emit_instr(ctx, and, dst, src1, src2);
225 static inline void emit_andi(unsigned int dst, unsigned int src,
226 u32 imm, struct jit_ctx *ctx)
228 /* If imm does not fit in u16 then load it to register */
229 if (imm >= BIT(16)) {
230 emit_load_imm(r_tmp, imm, ctx);
231 emit_and(dst, src, r_tmp, ctx);
233 emit_instr(ctx, andi, dst, src, imm);
237 static inline void emit_xor(unsigned int dst, unsigned int src1,
238 unsigned int src2, struct jit_ctx *ctx)
240 emit_instr(ctx, xor, dst, src1, src2);
243 static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx)
245 /* If imm does not fit in u16 then load it to register */
246 if (imm >= BIT(16)) {
247 emit_load_imm(r_tmp, imm, ctx);
248 emit_xor(dst, src, r_tmp, ctx);
250 emit_instr(ctx, xori, dst, src, imm);
254 static inline void emit_stack_offset(int offset, struct jit_ctx *ctx)
256 emit_long_instr(ctx, ADDIU, r_sp, r_sp, offset);
259 static inline void emit_subu(unsigned int dst, unsigned int src1,
260 unsigned int src2, struct jit_ctx *ctx)
262 emit_instr(ctx, subu, dst, src1, src2);
265 static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx)
267 emit_subu(reg, r_zero, reg, ctx);
270 static inline void emit_sllv(unsigned int dst, unsigned int src,
271 unsigned int sa, struct jit_ctx *ctx)
273 emit_instr(ctx, sllv, dst, src, sa);
276 static inline void emit_sll(unsigned int dst, unsigned int src,
277 unsigned int sa, struct jit_ctx *ctx)
279 /* sa is 5-bits long */
281 /* Shifting >= 32 results in zero */
282 emit_jit_reg_move(dst, r_zero, ctx);
284 emit_instr(ctx, sll, dst, src, sa);
287 static inline void emit_srlv(unsigned int dst, unsigned int src,
288 unsigned int sa, struct jit_ctx *ctx)
290 emit_instr(ctx, srlv, dst, src, sa);
293 static inline void emit_srl(unsigned int dst, unsigned int src,
294 unsigned int sa, struct jit_ctx *ctx)
296 /* sa is 5-bits long */
298 /* Shifting >= 32 results in zero */
299 emit_jit_reg_move(dst, r_zero, ctx);
301 emit_instr(ctx, srl, dst, src, sa);
304 static inline void emit_slt(unsigned int dst, unsigned int src1,
305 unsigned int src2, struct jit_ctx *ctx)
307 emit_instr(ctx, slt, dst, src1, src2);
310 static inline void emit_sltu(unsigned int dst, unsigned int src1,
311 unsigned int src2, struct jit_ctx *ctx)
313 emit_instr(ctx, sltu, dst, src1, src2);
316 static inline void emit_sltiu(unsigned dst, unsigned int src,
317 unsigned int imm, struct jit_ctx *ctx)
319 /* 16 bit immediate */
320 if (!is_range16((s32)imm)) {
321 emit_load_imm(r_tmp, imm, ctx);
322 emit_sltu(dst, src, r_tmp, ctx);
324 emit_instr(ctx, sltiu, dst, src, imm);
329 /* Store register on the stack */
330 static inline void emit_store_stack_reg(ptr reg, ptr base,
334 emit_long_instr(ctx, SW, reg, offset, base);
337 static inline void emit_store(ptr reg, ptr base, unsigned int offset,
340 emit_instr(ctx, sw, reg, offset, base);
343 static inline void emit_load_stack_reg(ptr reg, ptr base,
347 emit_long_instr(ctx, LW, reg, offset, base);
350 static inline void emit_load(unsigned int reg, unsigned int base,
351 unsigned int offset, struct jit_ctx *ctx)
353 emit_instr(ctx, lw, reg, offset, base);
356 static inline void emit_load_byte(unsigned int reg, unsigned int base,
357 unsigned int offset, struct jit_ctx *ctx)
359 emit_instr(ctx, lb, reg, offset, base);
362 static inline void emit_half_load(unsigned int reg, unsigned int base,
363 unsigned int offset, struct jit_ctx *ctx)
365 emit_instr(ctx, lh, reg, offset, base);
368 static inline void emit_half_load_unsigned(unsigned int reg, unsigned int base,
369 unsigned int offset, struct jit_ctx *ctx)
371 emit_instr(ctx, lhu, reg, offset, base);
374 static inline void emit_mul(unsigned int dst, unsigned int src1,
375 unsigned int src2, struct jit_ctx *ctx)
377 emit_instr(ctx, mul, dst, src1, src2);
380 static inline void emit_div(unsigned int dst, unsigned int src,
383 if (ctx->target != NULL) {
384 u32 *p = &ctx->target[ctx->idx];
385 uasm_i_divu(&p, dst, src);
386 p = &ctx->target[ctx->idx + 1];
387 uasm_i_mflo(&p, dst);
389 ctx->idx += 2; /* 2 insts */
392 static inline void emit_mod(unsigned int dst, unsigned int src,
395 if (ctx->target != NULL) {
396 u32 *p = &ctx->target[ctx->idx];
397 uasm_i_divu(&p, dst, src);
398 p = &ctx->target[ctx->idx + 1];
399 uasm_i_mfhi(&p, dst);
401 ctx->idx += 2; /* 2 insts */
404 static inline void emit_dsll(unsigned int dst, unsigned int src,
405 unsigned int sa, struct jit_ctx *ctx)
407 emit_instr(ctx, dsll, dst, src, sa);
410 static inline void emit_dsrl32(unsigned int dst, unsigned int src,
411 unsigned int sa, struct jit_ctx *ctx)
413 emit_instr(ctx, dsrl32, dst, src, sa);
416 static inline void emit_wsbh(unsigned int dst, unsigned int src,
419 emit_instr(ctx, wsbh, dst, src);
422 /* load pointer to register */
423 static inline void emit_load_ptr(unsigned int dst, unsigned int src,
424 int imm, struct jit_ctx *ctx)
426 /* src contains the base addr of the 32/64-pointer */
427 emit_long_instr(ctx, LW, dst, imm, src);
430 /* load a function pointer to register */
431 static inline void emit_load_func(unsigned int reg, ptr imm,
434 if (IS_ENABLED(CONFIG_64BIT)) {
435 /* At this point imm is always 64-bit */
436 emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
437 emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
438 emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx);
439 emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
440 emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx);
442 emit_load_imm(reg, imm, ctx);
446 /* Move to real MIPS register */
447 static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
449 emit_long_instr(ctx, ADDU, dst, src, r_zero);
452 /* Move to JIT (32-bit) register */
453 static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
455 emit_addu(dst, src, r_zero, ctx);
458 /* Compute the immediate value for PC-relative branches. */
459 static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
461 if (ctx->target == NULL)
465 * We want a pc-relative branch. We only do forward branches
466 * so tgt is always after pc. tgt is the instruction offset
467 * we want to jump to.
470 * I: target_offset <- sign_extend(offset)
471 * I+1: PC += target_offset (delay slot)
473 * ctx->idx currently points to the branch instruction
474 * but the offset is added to the delay slot so we need
477 return ctx->offsets[tgt] -
478 (ctx->idx * 4 - ctx->prologue_bytes) - 4;
481 static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2,
482 unsigned int imm, struct jit_ctx *ctx)
484 if (ctx->target != NULL) {
485 u32 *p = &ctx->target[ctx->idx];
489 uasm_i_beq(&p, reg1, reg2, imm);
492 uasm_i_bne(&p, reg1, reg2, imm);
498 pr_warn("%s: Unhandled branch conditional: %d\n",
505 static inline void emit_b(unsigned int imm, struct jit_ctx *ctx)
507 emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx);
510 static inline void emit_jalr(unsigned int link, unsigned int reg,
513 emit_instr(ctx, jalr, link, reg);
516 static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx)
518 emit_instr(ctx, jr, reg);
521 static inline u16 align_sp(unsigned int num)
523 /* Double word alignment for 32-bit, quadword for 64-bit */
524 unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8;
525 num = (num + (align - 1)) & -align;
529 static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
531 int i = 0, real_off = 0;
532 u32 sflags, tmp_flags;
534 /* Adjust the stack pointer */
535 emit_stack_offset(-align_sp(offset), ctx);
537 tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
538 /* sflags is essentially a bitmap */
540 if ((sflags >> i) & 0x1) {
541 emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
549 /* save return address */
550 if (ctx->flags & SEEN_CALL) {
551 emit_store_stack_reg(r_ra, r_sp, real_off, ctx);
555 /* Setup r_M leaving the alignment gap if necessary */
556 if (ctx->flags & SEEN_MEM) {
557 if (real_off % (SZREG * 2))
559 emit_long_instr(ctx, ADDIU, r_M, r_sp, real_off);
563 static void restore_bpf_jit_regs(struct jit_ctx *ctx,
567 u32 sflags, tmp_flags;
569 tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
570 /* sflags is a bitmap */
573 if ((sflags >> i) & 0x1) {
574 emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
582 /* restore return address */
583 if (ctx->flags & SEEN_CALL)
584 emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
586 /* Restore the sp and discard the scrach memory */
587 emit_stack_offset(align_sp(offset), ctx);
590 static unsigned int get_stack_depth(struct jit_ctx *ctx)
595 /* How may s* regs do we need to preserved? */
596 sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * SZREG;
598 if (ctx->flags & SEEN_MEM)
599 sp_off += 4 * BPF_MEMWORDS; /* BPF_MEMWORDS are 32-bit */
601 if (ctx->flags & SEEN_CALL)
602 sp_off += SZREG; /* Space for our ra register */
607 static void build_prologue(struct jit_ctx *ctx)
611 /* Calculate the total offset for the stack pointer */
612 sp_off = get_stack_depth(ctx);
613 save_bpf_jit_regs(ctx, sp_off);
615 if (ctx->flags & SEEN_SKB)
616 emit_reg_move(r_skb, MIPS_R_A0, ctx);
618 if (ctx->flags & SEEN_SKB_DATA) {
619 /* Load packet length */
620 emit_load(r_skb_len, r_skb, offsetof(struct sk_buff, len),
622 emit_load(r_tmp, r_skb, offsetof(struct sk_buff, data_len),
624 /* Load the data pointer */
625 emit_load_ptr(r_skb_data, r_skb,
626 offsetof(struct sk_buff, data), ctx);
627 /* Load the header length */
628 emit_subu(r_skb_hl, r_skb_len, r_tmp, ctx);
631 if (ctx->flags & SEEN_X)
632 emit_jit_reg_move(r_X, r_zero, ctx);
634 /* Do not leak kernel data to userspace */
635 if (bpf_needs_clear_a(&ctx->skf->insns[0]))
636 emit_jit_reg_move(r_A, r_zero, ctx);
639 static void build_epilogue(struct jit_ctx *ctx)
643 /* Calculate the total offset for the stack pointer */
645 sp_off = get_stack_depth(ctx);
646 restore_bpf_jit_regs(ctx, sp_off);
653 #define CHOOSE_LOAD_FUNC(K, func) \
654 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
657 static int build_body(struct jit_ctx *ctx)
659 const struct bpf_prog *prog = ctx->skf;
660 const struct sock_filter *inst;
661 unsigned int i, off, condt;
662 u32 k, b_off __maybe_unused;
663 u8 (*sk_load_func)(unsigned long *skb, int offset);
665 for (i = 0; i < prog->len; i++) {
668 inst = &(prog->insns[i]);
669 pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
670 __func__, inst->code, inst->jt, inst->jf, inst->k);
672 code = bpf_anc_helper(inst);
674 if (ctx->target == NULL)
675 ctx->offsets[i] = ctx->idx * 4;
678 case BPF_LD | BPF_IMM:
679 /* A <- k ==> li r_A, k */
680 ctx->flags |= SEEN_A;
681 emit_load_imm(r_A, k, ctx);
683 case BPF_LD | BPF_W | BPF_LEN:
684 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
685 /* A <- len ==> lw r_A, offset(skb) */
686 ctx->flags |= SEEN_SKB | SEEN_A;
687 off = offsetof(struct sk_buff, len);
688 emit_load(r_A, r_skb, off, ctx);
690 case BPF_LD | BPF_MEM:
691 /* A <- M[k] ==> lw r_A, offset(M) */
692 ctx->flags |= SEEN_MEM | SEEN_A;
693 emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
695 case BPF_LD | BPF_W | BPF_ABS:
697 sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_word);
699 case BPF_LD | BPF_H | BPF_ABS:
701 sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_half);
703 case BPF_LD | BPF_B | BPF_ABS:
705 sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_byte);
707 emit_load_imm(r_off, k, ctx);
709 ctx->flags |= SEEN_CALL | SEEN_OFF |
710 SEEN_SKB | SEEN_A | SEEN_SKB_DATA;
712 emit_load_func(r_s0, (ptr)sk_load_func, ctx);
713 emit_reg_move(MIPS_R_A0, r_skb, ctx);
714 emit_jalr(MIPS_R_RA, r_s0, ctx);
715 /* Load second argument to delay slot */
716 emit_reg_move(MIPS_R_A1, r_off, ctx);
717 /* Check the error value */
718 emit_bcond(MIPS_COND_EQ, r_ret, 0, b_imm(i + 1, ctx),
720 /* Load return register on DS for failures */
721 emit_reg_move(r_ret, r_zero, ctx);
722 /* Return with error */
723 emit_b(b_imm(prog->len, ctx), ctx);
726 case BPF_LD | BPF_W | BPF_IND:
727 /* A <- P[X + k:4] */
728 sk_load_func = sk_load_word;
730 case BPF_LD | BPF_H | BPF_IND:
731 /* A <- P[X + k:2] */
732 sk_load_func = sk_load_half;
734 case BPF_LD | BPF_B | BPF_IND:
735 /* A <- P[X + k:1] */
736 sk_load_func = sk_load_byte;
738 ctx->flags |= SEEN_OFF | SEEN_X;
739 emit_addiu(r_off, r_X, k, ctx);
741 case BPF_LDX | BPF_IMM:
743 ctx->flags |= SEEN_X;
744 emit_load_imm(r_X, k, ctx);
746 case BPF_LDX | BPF_MEM:
748 ctx->flags |= SEEN_X | SEEN_MEM;
749 emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
751 case BPF_LDX | BPF_W | BPF_LEN:
753 ctx->flags |= SEEN_X | SEEN_SKB;
754 off = offsetof(struct sk_buff, len);
755 emit_load(r_X, r_skb, off, ctx);
757 case BPF_LDX | BPF_B | BPF_MSH:
758 /* X <- 4 * (P[k:1] & 0xf) */
759 ctx->flags |= SEEN_X | SEEN_CALL | SEEN_SKB;
760 /* Load offset to a1 */
761 emit_load_func(r_s0, (ptr)sk_load_byte, ctx);
763 * This may emit two instructions so it may not fit
764 * in the delay slot. So use a0 in the delay slot.
766 emit_load_imm(MIPS_R_A1, k, ctx);
767 emit_jalr(MIPS_R_RA, r_s0, ctx);
768 emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
769 /* Check the error value */
770 emit_bcond(MIPS_COND_NE, r_ret, 0,
771 b_imm(prog->len, ctx), ctx);
772 emit_reg_move(r_ret, r_zero, ctx);
774 /* X <- P[1:K] & 0xf */
775 emit_andi(r_X, r_A, 0xf, ctx);
777 emit_b(b_imm(i + 1, ctx), ctx);
778 emit_sll(r_X, r_X, 2, ctx); /* delay slot */
782 ctx->flags |= SEEN_MEM | SEEN_A;
783 emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
787 ctx->flags |= SEEN_MEM | SEEN_X;
788 emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
790 case BPF_ALU | BPF_ADD | BPF_K:
792 ctx->flags |= SEEN_A;
793 emit_addiu(r_A, r_A, k, ctx);
795 case BPF_ALU | BPF_ADD | BPF_X:
797 ctx->flags |= SEEN_A | SEEN_X;
798 emit_addu(r_A, r_A, r_X, ctx);
800 case BPF_ALU | BPF_SUB | BPF_K:
802 ctx->flags |= SEEN_A;
803 emit_addiu(r_A, r_A, -k, ctx);
805 case BPF_ALU | BPF_SUB | BPF_X:
807 ctx->flags |= SEEN_A | SEEN_X;
808 emit_subu(r_A, r_A, r_X, ctx);
810 case BPF_ALU | BPF_MUL | BPF_K:
812 /* Load K to scratch register before MUL */
813 ctx->flags |= SEEN_A;
814 emit_load_imm(r_s0, k, ctx);
815 emit_mul(r_A, r_A, r_s0, ctx);
817 case BPF_ALU | BPF_MUL | BPF_X:
819 ctx->flags |= SEEN_A | SEEN_X;
820 emit_mul(r_A, r_A, r_X, ctx);
822 case BPF_ALU | BPF_DIV | BPF_K:
826 if (optimize_div(&k)) {
827 ctx->flags |= SEEN_A;
828 emit_srl(r_A, r_A, k, ctx);
831 ctx->flags |= SEEN_A;
832 emit_load_imm(r_s0, k, ctx);
833 emit_div(r_A, r_s0, ctx);
835 case BPF_ALU | BPF_MOD | BPF_K:
838 ctx->flags |= SEEN_A;
839 emit_jit_reg_move(r_A, r_zero, ctx);
841 ctx->flags |= SEEN_A;
842 emit_load_imm(r_s0, k, ctx);
843 emit_mod(r_A, r_s0, ctx);
846 case BPF_ALU | BPF_DIV | BPF_X:
848 ctx->flags |= SEEN_X | SEEN_A;
849 /* Check if r_X is zero */
850 emit_bcond(MIPS_COND_EQ, r_X, r_zero,
851 b_imm(prog->len, ctx), ctx);
852 emit_load_imm(r_ret, 0, ctx); /* delay slot */
853 emit_div(r_A, r_X, ctx);
855 case BPF_ALU | BPF_MOD | BPF_X:
857 ctx->flags |= SEEN_X | SEEN_A;
858 /* Check if r_X is zero */
859 emit_bcond(MIPS_COND_EQ, r_X, r_zero,
860 b_imm(prog->len, ctx), ctx);
861 emit_load_imm(r_ret, 0, ctx); /* delay slot */
862 emit_mod(r_A, r_X, ctx);
864 case BPF_ALU | BPF_OR | BPF_K:
866 ctx->flags |= SEEN_A;
867 emit_ori(r_A, r_A, k, ctx);
869 case BPF_ALU | BPF_OR | BPF_X:
871 ctx->flags |= SEEN_A;
872 emit_ori(r_A, r_A, r_X, ctx);
874 case BPF_ALU | BPF_XOR | BPF_K:
876 ctx->flags |= SEEN_A;
877 emit_xori(r_A, r_A, k, ctx);
879 case BPF_ANC | SKF_AD_ALU_XOR_X:
880 case BPF_ALU | BPF_XOR | BPF_X:
882 ctx->flags |= SEEN_A;
883 emit_xor(r_A, r_A, r_X, ctx);
885 case BPF_ALU | BPF_AND | BPF_K:
887 ctx->flags |= SEEN_A;
888 emit_andi(r_A, r_A, k, ctx);
890 case BPF_ALU | BPF_AND | BPF_X:
892 ctx->flags |= SEEN_A | SEEN_X;
893 emit_and(r_A, r_A, r_X, ctx);
895 case BPF_ALU | BPF_LSH | BPF_K:
897 ctx->flags |= SEEN_A;
898 emit_sll(r_A, r_A, k, ctx);
900 case BPF_ALU | BPF_LSH | BPF_X:
902 ctx->flags |= SEEN_A | SEEN_X;
903 emit_sllv(r_A, r_A, r_X, ctx);
905 case BPF_ALU | BPF_RSH | BPF_K:
907 ctx->flags |= SEEN_A;
908 emit_srl(r_A, r_A, k, ctx);
910 case BPF_ALU | BPF_RSH | BPF_X:
911 ctx->flags |= SEEN_A | SEEN_X;
912 emit_srlv(r_A, r_A, r_X, ctx);
914 case BPF_ALU | BPF_NEG:
916 ctx->flags |= SEEN_A;
919 case BPF_JMP | BPF_JA:
921 emit_b(b_imm(i + k + 1, ctx), ctx);
924 case BPF_JMP | BPF_JEQ | BPF_K:
925 /* pc += ( A == K ) ? pc->jt : pc->jf */
926 condt = MIPS_COND_EQ | MIPS_COND_K;
928 case BPF_JMP | BPF_JEQ | BPF_X:
929 ctx->flags |= SEEN_X;
930 /* pc += ( A == X ) ? pc->jt : pc->jf */
931 condt = MIPS_COND_EQ | MIPS_COND_X;
933 case BPF_JMP | BPF_JGE | BPF_K:
934 /* pc += ( A >= K ) ? pc->jt : pc->jf */
935 condt = MIPS_COND_GE | MIPS_COND_K;
937 case BPF_JMP | BPF_JGE | BPF_X:
938 ctx->flags |= SEEN_X;
939 /* pc += ( A >= X ) ? pc->jt : pc->jf */
940 condt = MIPS_COND_GE | MIPS_COND_X;
942 case BPF_JMP | BPF_JGT | BPF_K:
943 /* pc += ( A > K ) ? pc->jt : pc->jf */
944 condt = MIPS_COND_GT | MIPS_COND_K;
946 case BPF_JMP | BPF_JGT | BPF_X:
947 ctx->flags |= SEEN_X;
948 /* pc += ( A > X ) ? pc->jt : pc->jf */
949 condt = MIPS_COND_GT | MIPS_COND_X;
951 /* Greater or Equal */
952 if ((condt & MIPS_COND_GE) ||
953 (condt & MIPS_COND_GT)) {
954 if (condt & MIPS_COND_K) { /* K */
955 ctx->flags |= SEEN_A;
956 emit_sltiu(r_s0, r_A, k, ctx);
958 ctx->flags |= SEEN_A |
960 emit_sltu(r_s0, r_A, r_X, ctx);
962 /* A < (K|X) ? r_scrach = 1 */
963 b_off = b_imm(i + inst->jf + 1, ctx);
964 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
967 /* A > (K|X) ? scratch = 0 */
968 if (condt & MIPS_COND_GT) {
969 /* Checking for equality */
970 ctx->flags |= SEEN_A | SEEN_X;
971 if (condt & MIPS_COND_K)
972 emit_load_imm(r_s0, k, ctx);
974 emit_jit_reg_move(r_s0, r_X,
976 b_off = b_imm(i + inst->jf + 1, ctx);
977 emit_bcond(MIPS_COND_EQ, r_A, r_s0,
980 /* Finally, A > K|X */
981 b_off = b_imm(i + inst->jt + 1, ctx);
985 /* A >= (K|X) so jump */
986 b_off = b_imm(i + inst->jt + 1, ctx);
992 if (condt & MIPS_COND_K) { /* K */
993 ctx->flags |= SEEN_A;
994 emit_load_imm(r_s0, k, ctx);
996 b_off = b_imm(i + inst->jt + 1, ctx);
997 emit_bcond(MIPS_COND_EQ, r_A, r_s0,
1001 b_off = b_imm(i + inst->jf + 1,
1003 emit_bcond(MIPS_COND_NE, r_A, r_s0,
1008 ctx->flags |= SEEN_A | SEEN_X;
1009 b_off = b_imm(i + inst->jt + 1,
1011 emit_bcond(MIPS_COND_EQ, r_A, r_X,
1015 b_off = b_imm(i + inst->jf + 1, ctx);
1016 emit_bcond(MIPS_COND_NE, r_A, r_X,
1022 case BPF_JMP | BPF_JSET | BPF_K:
1023 ctx->flags |= SEEN_A;
1024 /* pc += (A & K) ? pc -> jt : pc -> jf */
1025 emit_load_imm(r_s1, k, ctx);
1026 emit_and(r_s0, r_A, r_s1, ctx);
1028 b_off = b_imm(i + inst->jt + 1, ctx);
1029 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
1032 b_off = b_imm(i + inst->jf + 1, ctx);
1036 case BPF_JMP | BPF_JSET | BPF_X:
1037 ctx->flags |= SEEN_X | SEEN_A;
1038 /* pc += (A & X) ? pc -> jt : pc -> jf */
1039 emit_and(r_s0, r_A, r_X, ctx);
1041 b_off = b_imm(i + inst->jt + 1, ctx);
1042 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
1045 b_off = b_imm(i + inst->jf + 1, ctx);
1049 case BPF_RET | BPF_A:
1050 ctx->flags |= SEEN_A;
1051 if (i != prog->len - 1)
1053 * If this is not the last instruction
1054 * then jump to the epilogue
1056 emit_b(b_imm(prog->len, ctx), ctx);
1057 emit_reg_move(r_ret, r_A, ctx); /* delay slot */
1059 case BPF_RET | BPF_K:
1061 * It can emit two instructions so it does not fit on
1064 emit_load_imm(r_ret, k, ctx);
1065 if (i != prog->len - 1) {
1067 * If this is not the last instruction
1068 * then jump to the epilogue
1070 emit_b(b_imm(prog->len, ctx), ctx);
1074 case BPF_MISC | BPF_TAX:
1076 ctx->flags |= SEEN_X | SEEN_A;
1077 emit_jit_reg_move(r_X, r_A, ctx);
1079 case BPF_MISC | BPF_TXA:
1081 ctx->flags |= SEEN_A | SEEN_X;
1082 emit_jit_reg_move(r_A, r_X, ctx);
1085 case BPF_ANC | SKF_AD_PROTOCOL:
1086 /* A = ntohs(skb->protocol */
1087 ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
1088 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1090 off = offsetof(struct sk_buff, protocol);
1091 emit_half_load(r_A, r_skb, off, ctx);
1092 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1093 /* This needs little endian fixup */
1095 /* R2 and later have the wsbh instruction */
1096 emit_wsbh(r_A, r_A, ctx);
1098 /* Get first byte */
1099 emit_andi(r_tmp_imm, r_A, 0xff, ctx);
1101 emit_sll(r_tmp, r_tmp_imm, 8, ctx);
1102 /* Get second byte */
1103 emit_srl(r_tmp_imm, r_A, 8, ctx);
1104 emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx);
1105 /* Put everyting together in r_A */
1106 emit_or(r_A, r_tmp, r_tmp_imm, ctx);
1110 case BPF_ANC | SKF_AD_CPU:
1111 ctx->flags |= SEEN_A | SEEN_OFF;
1112 /* A = current_thread_info()->cpu */
1113 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info,
1115 off = offsetof(struct thread_info, cpu);
1116 /* $28/gp points to the thread_info struct */
1117 emit_load(r_A, 28, off, ctx);
1119 case BPF_ANC | SKF_AD_IFINDEX:
1120 /* A = skb->dev->ifindex */
1121 case BPF_ANC | SKF_AD_HATYPE:
1122 /* A = skb->dev->type */
1123 ctx->flags |= SEEN_SKB | SEEN_A;
1124 off = offsetof(struct sk_buff, dev);
1125 /* Load *dev pointer */
1126 emit_load_ptr(r_s0, r_skb, off, ctx);
1127 /* error (0) in the delay slot */
1128 emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
1129 b_imm(prog->len, ctx), ctx);
1130 emit_reg_move(r_ret, r_zero, ctx);
1131 if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
1132 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
1133 off = offsetof(struct net_device, ifindex);
1134 emit_load(r_A, r_s0, off, ctx);
1135 } else { /* (code == (BPF_ANC | SKF_AD_HATYPE) */
1136 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
1137 off = offsetof(struct net_device, type);
1138 emit_half_load_unsigned(r_A, r_s0, off, ctx);
1141 case BPF_ANC | SKF_AD_MARK:
1142 ctx->flags |= SEEN_SKB | SEEN_A;
1143 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
1144 off = offsetof(struct sk_buff, mark);
1145 emit_load(r_A, r_skb, off, ctx);
1147 case BPF_ANC | SKF_AD_RXHASH:
1148 ctx->flags |= SEEN_SKB | SEEN_A;
1149 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
1150 off = offsetof(struct sk_buff, hash);
1151 emit_load(r_A, r_skb, off, ctx);
1153 case BPF_ANC | SKF_AD_VLAN_TAG:
1154 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
1155 ctx->flags |= SEEN_SKB | SEEN_A;
1156 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1158 off = offsetof(struct sk_buff, vlan_tci);
1159 emit_half_load_unsigned(r_s0, r_skb, off, ctx);
1160 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
1161 emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx);
1163 emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx);
1164 /* return 1 if present */
1165 emit_sltu(r_A, r_zero, r_A, ctx);
1168 case BPF_ANC | SKF_AD_PKTTYPE:
1169 ctx->flags |= SEEN_SKB;
1171 emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx);
1172 /* Keep only the last 3 bits */
1173 emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
1174 #ifdef __BIG_ENDIAN_BITFIELD
1175 /* Get the actual packet type to the lower 3 bits */
1176 emit_srl(r_A, r_A, 5, ctx);
1179 case BPF_ANC | SKF_AD_QUEUE:
1180 ctx->flags |= SEEN_SKB | SEEN_A;
1181 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1182 queue_mapping) != 2);
1183 BUILD_BUG_ON(offsetof(struct sk_buff,
1184 queue_mapping) > 0xff);
1185 off = offsetof(struct sk_buff, queue_mapping);
1186 emit_half_load_unsigned(r_A, r_skb, off, ctx);
1189 pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
1195 /* compute offsets only during the first pass */
1196 if (ctx->target == NULL)
1197 ctx->offsets[i] = ctx->idx * 4;
1202 int bpf_jit_enable __read_mostly;
1204 void bpf_jit_compile(struct bpf_prog *fp)
1207 unsigned int alloc_size, tmp_idx;
1209 if (!bpf_jit_enable)
1212 memset(&ctx, 0, sizeof(ctx));
1214 ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
1215 if (ctx.offsets == NULL)
1220 if (build_body(&ctx))
1224 build_prologue(&ctx);
1225 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
1226 /* just to complete the ctx.idx count */
1227 build_epilogue(&ctx);
1229 alloc_size = 4 * ctx.idx;
1230 ctx.target = module_alloc(alloc_size);
1231 if (ctx.target == NULL)
1235 memset(ctx.target, 0, alloc_size);
1239 /* Generate the actual JIT code */
1240 build_prologue(&ctx);
1242 build_epilogue(&ctx);
1244 /* Update the icache */
1245 flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx));
1247 if (bpf_jit_enable > 1)
1249 bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
1251 fp->bpf_func = (void *)ctx.target;
1258 void bpf_jit_free(struct bpf_prog *fp)
1261 module_memfree(fp->bpf_func);
1263 bpf_prog_unlock_free(fp);