1 #include <linux/moduleloader.h>
2 #include <linux/workqueue.h>
3 #include <linux/netdevice.h>
4 #include <linux/filter.h>
5 #include <linux/cache.h>
7 #include <asm/cacheflush.h>
8 #include <asm/ptrace.h>
12 int bpf_jit_enable __read_mostly;
14 static inline bool is_simm13(unsigned int value)
16 return value + 0x1000 < 0x2000;
19 static void bpf_flush_icache(void *start_, void *end_)
22 /* Cheetah's I-cache is fully coherent. */
23 if (tlb_type == spitfire) {
24 unsigned long start = (unsigned long) start_;
25 unsigned long end = (unsigned long) end_;
28 end = (end + 7UL) & ~7UL;
37 #define SEEN_DATAREF 1 /* might call external helpers */
38 #define SEEN_XREG 2 /* ebx is used */
39 #define SEEN_MEM 4 /* use mem[] for temporary storage */
41 #define S13(X) ((X) & 0x1fff)
42 #define IMMED 0x00002000
43 #define RD(X) ((X) << 25)
44 #define RS1(X) ((X) << 14)
46 #define OP(X) ((X) << 30)
47 #define OP2(X) ((X) << 22)
48 #define OP3(X) ((X) << 19)
49 #define COND(X) ((X) << 25)
51 #define F2(X, Y) (OP(X) | OP2(Y))
52 #define F3(X, Y) (OP(X) | OP3(Y))
54 #define CONDN COND(0x0)
55 #define CONDE COND(0x1)
56 #define CONDLE COND(0x2)
57 #define CONDL COND(0x3)
58 #define CONDLEU COND(0x4)
59 #define CONDCS COND(0x5)
60 #define CONDNEG COND(0x6)
61 #define CONDVC COND(0x7)
62 #define CONDA COND(0x8)
63 #define CONDNE COND(0x9)
64 #define CONDG COND(0xa)
65 #define CONDGE COND(0xb)
66 #define CONDGU COND(0xc)
67 #define CONDCC COND(0xd)
68 #define CONDPOS COND(0xe)
69 #define CONDVS COND(0xf)
71 #define CONDGEU CONDCC
74 #define WDISP22(X) (((X) >> 2) & 0x3fffff)
76 #define BA (F2(0, 2) | CONDA)
77 #define BGU (F2(0, 2) | CONDGU)
78 #define BLEU (F2(0, 2) | CONDLEU)
79 #define BGEU (F2(0, 2) | CONDGEU)
80 #define BLU (F2(0, 2) | CONDLU)
81 #define BE (F2(0, 2) | CONDE)
82 #define BNE (F2(0, 2) | CONDNE)
85 #define BNE_PTR (F2(0, 1) | CONDNE | (2 << 20))
90 #define SETHI(K, REG) \
91 (F2(0, 0x4) | RD(REG) | (((K) >> 10) & 0x3fffff))
92 #define OR_LO(K, REG) \
93 (F3(2, 0x02) | IMMED | RS1(REG) | ((K) & 0x3ff) | RD(REG))
95 #define ADD F3(2, 0x00)
96 #define AND F3(2, 0x01)
97 #define ANDCC F3(2, 0x11)
98 #define OR F3(2, 0x02)
99 #define SUB F3(2, 0x04)
100 #define SUBCC F3(2, 0x14)
101 #define MUL F3(2, 0x0a) /* umul */
102 #define DIV F3(2, 0x0e) /* udiv */
103 #define SLL F3(2, 0x25)
104 #define SRL F3(2, 0x26)
105 #define JMPL F3(2, 0x38)
107 #define BR F2(0, 0x01)
108 #define RD_Y F3(2, 0x28)
109 #define WR_Y F3(2, 0x30)
111 #define LD32 F3(3, 0x00)
112 #define LD8 F3(3, 0x01)
113 #define LD16 F3(3, 0x02)
114 #define LD64 F3(3, 0x0b)
115 #define ST32 F3(3, 0x04)
117 #ifdef CONFIG_SPARC64
119 #define BASE_STACKFRAME 176
122 #define BASE_STACKFRAME 96
125 #define LD32I (LD32 | IMMED)
126 #define LD8I (LD8 | IMMED)
127 #define LD16I (LD16 | IMMED)
128 #define LD64I (LD64 | IMMED)
129 #define LDPTRI (LDPTR | IMMED)
130 #define ST32I (ST32 | IMMED)
134 *prog++ = SETHI(0, G0); \
138 do { /* sub %g0, r_A, r_A */ \
139 *prog++ = SUB | RS1(G0) | RS2(r_A) | RD(r_A); \
142 #define emit_reg_move(FROM, TO) \
143 do { /* or %g0, FROM, TO */ \
144 *prog++ = OR | RS1(G0) | RS2(FROM) | RD(TO); \
147 #define emit_clear(REG) \
148 do { /* or %g0, %g0, REG */ \
149 *prog++ = OR | RS1(G0) | RS2(G0) | RD(REG); \
152 #define emit_set_const(K, REG) \
153 do { /* sethi %hi(K), REG */ \
154 *prog++ = SETHI(K, REG); \
155 /* or REG, %lo(K), REG */ \
156 *prog++ = OR_LO(K, REG); \
163 #define emit_alu_X(OPCODE) \
166 *prog++ = OPCODE | RS1(r_A) | RS2(r_X) | RD(r_A); \
175 * sethi %hi(K), r_TMP
176 * or r_TMP, %lo(K), r_TMP
179 * depending upon whether K fits in a signed 13-bit
180 * immediate instruction field. Emit nothing if K
183 #define emit_alu_K(OPCODE, K) \
186 unsigned int _insn = OPCODE; \
187 _insn |= RS1(r_A) | RD(r_A); \
188 if (is_simm13(K)) { \
189 *prog++ = _insn | IMMED | S13(K); \
191 emit_set_const(K, r_TMP); \
192 *prog++ = _insn | RS2(r_TMP); \
197 #define emit_loadimm(K, DEST) \
199 if (is_simm13(K)) { \
200 /* or %g0, K, DEST */ \
201 *prog++ = OR | IMMED | RS1(G0) | S13(K) | RD(DEST); \
203 emit_set_const(K, DEST); \
207 #define emit_loadptr(BASE, STRUCT, FIELD, DEST) \
208 do { unsigned int _off = offsetof(STRUCT, FIELD); \
209 BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(void *)); \
210 *prog++ = LDPTRI | RS1(BASE) | S13(_off) | RD(DEST); \
213 #define emit_load32(BASE, STRUCT, FIELD, DEST) \
214 do { unsigned int _off = offsetof(STRUCT, FIELD); \
215 BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u32)); \
216 *prog++ = LD32I | RS1(BASE) | S13(_off) | RD(DEST); \
219 #define emit_load16(BASE, STRUCT, FIELD, DEST) \
220 do { unsigned int _off = offsetof(STRUCT, FIELD); \
221 BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u16)); \
222 *prog++ = LD16I | RS1(BASE) | S13(_off) | RD(DEST); \
225 #define __emit_load8(BASE, STRUCT, FIELD, DEST) \
226 do { unsigned int _off = offsetof(STRUCT, FIELD); \
227 *prog++ = LD8I | RS1(BASE) | S13(_off) | RD(DEST); \
230 #define emit_load8(BASE, STRUCT, FIELD, DEST) \
231 do { BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8)); \
232 __emit_load8(BASE, STRUCT, FIELD, DEST); \
235 #define emit_ldmem(OFF, DEST) \
236 do { *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(DEST); \
239 #define emit_stmem(OFF, SRC) \
240 do { *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(SRC); \
243 #define cpu_off offsetof(struct thread_info, cpu)
246 #ifdef CONFIG_SPARC64
247 #define emit_load_cpu(REG) \
248 emit_load16(G6, struct thread_info, cpu, REG)
250 #define emit_load_cpu(REG) \
251 emit_load32(G6, struct thread_info, cpu, REG)
254 #define emit_load_cpu(REG) emit_clear(REG)
257 #define emit_skb_loadptr(FIELD, DEST) \
258 emit_loadptr(r_SKB, struct sk_buff, FIELD, DEST)
259 #define emit_skb_load32(FIELD, DEST) \
260 emit_load32(r_SKB, struct sk_buff, FIELD, DEST)
261 #define emit_skb_load16(FIELD, DEST) \
262 emit_load16(r_SKB, struct sk_buff, FIELD, DEST)
263 #define __emit_skb_load8(FIELD, DEST) \
264 __emit_load8(r_SKB, struct sk_buff, FIELD, DEST)
265 #define emit_skb_load8(FIELD, DEST) \
266 emit_load8(r_SKB, struct sk_buff, FIELD, DEST)
268 #define emit_jmpl(BASE, IMM_OFF, LREG) \
269 *prog++ = (JMPL | IMMED | RS1(BASE) | S13(IMM_OFF) | RD(LREG))
271 #define emit_call(FUNC) \
272 do { void *_here = image + addrs[i] - 8; \
273 unsigned int _off = (void *)(FUNC) - _here; \
274 *prog++ = CALL | (((_off) >> 2) & 0x3fffffff); \
278 #define emit_branch(BR_OPC, DEST) \
279 do { unsigned int _here = addrs[i] - 8; \
280 *prog++ = BR_OPC | WDISP22((DEST) - _here); \
283 #define emit_branch_off(BR_OPC, OFF) \
284 do { *prog++ = BR_OPC | WDISP22(OFF); \
287 #define emit_jump(DEST) emit_branch(BA, DEST)
289 #define emit_read_y(REG) *prog++ = RD_Y | RD(REG)
290 #define emit_write_y(REG) *prog++ = WR_Y | IMMED | RS1(REG) | S13(0)
292 #define emit_cmp(R1, R2) \
293 *prog++ = (SUBCC | RS1(R1) | RS2(R2) | RD(G0))
295 #define emit_cmpi(R1, IMM) \
296 *prog++ = (SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0));
298 #define emit_btst(R1, R2) \
299 *prog++ = (ANDCC | RS1(R1) | RS2(R2) | RD(G0))
301 #define emit_btsti(R1, IMM) \
302 *prog++ = (ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0));
304 #define emit_sub(R1, R2, R3) \
305 *prog++ = (SUB | RS1(R1) | RS2(R2) | RD(R3))
307 #define emit_subi(R1, IMM, R3) \
308 *prog++ = (SUB | IMMED | RS1(R1) | S13(IMM) | RD(R3))
310 #define emit_add(R1, R2, R3) \
311 *prog++ = (ADD | RS1(R1) | RS2(R2) | RD(R3))
313 #define emit_addi(R1, IMM, R3) \
314 *prog++ = (ADD | IMMED | RS1(R1) | S13(IMM) | RD(R3))
316 #define emit_alloc_stack(SZ) \
317 *prog++ = (SUB | IMMED | RS1(SP) | S13(SZ) | RD(SP))
319 #define emit_release_stack(SZ) \
320 *prog++ = (ADD | IMMED | RS1(SP) | S13(SZ) | RD(SP))
322 /* A note about branch offset calculations. The addrs[] array,
323 * indexed by BPF instruction, records the address after all the
324 * sparc instructions emitted for that BPF instruction.
326 * The most common case is to emit a branch at the end of such
327 * a code sequence. So this would be two instructions, the
328 * branch and it's delay slot.
330 * Therefore by default the branch emitters calculate the branch
333 * destination - (addrs[i] - 8)
335 * This "addrs[i] - 8" is the address of the branch itself or
336 * what "." would be in assembler notation. The "8" part is
337 * how we take into consideration the branch and it's delay
338 * slot mentioned above.
340 * Sometimes we need to emit a branch earlier in the code
341 * sequence. And in these situations we adjust "destination"
342 * to accomodate this difference. For example, if we needed
343 * to emit a branch (and it's delay slot) right before the
344 * final instruction emitted for a BPF opcode, we'd use
345 * "destination + 4" instead of just plain "destination" above.
347 * This is why you see all of these funny emit_branch() and
348 * emit_jump() calls with adjusted offsets.
351 void bpf_jit_compile(struct sk_filter *fp)
353 unsigned int cleanup_addr, proglen, oldproglen = 0;
354 u32 temp[8], *prog, *func, seen = 0, pass;
355 const struct sock_filter *filter = fp->insns;
356 int i, flen = fp->len, pc_ret0 = -1;
363 addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
367 /* Before first pass, make a rough estimation of addrs[]
368 * each bpf instruction is translated to less than 64 bytes
370 for (proglen = 0, i = 0; i < flen; i++) {
374 cleanup_addr = proglen; /* epilogue address */
376 for (pass = 0; pass < 10; pass++) {
377 u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
379 /* no prologue/epilogue for trivial filters (RET something) */
385 if (seen_or_pass0 & SEEN_MEM) {
386 unsigned int sz = BASE_STACKFRAME;
387 sz += BPF_MEMWORDS * sizeof(u32);
388 emit_alloc_stack(sz);
391 /* Make sure we dont leek kernel memory. */
392 if (seen_or_pass0 & SEEN_XREG)
395 /* If this filter needs to access skb data,
396 * load %o4 and %o5 with:
397 * %o4 = skb->len - skb->data_len
399 * And also back up %o7 into r_saved_O7 so we can
400 * invoke the stubs using 'call'.
402 if (seen_or_pass0 & SEEN_DATAREF) {
403 emit_load32(r_SKB, struct sk_buff, len, r_HEADLEN);
404 emit_load32(r_SKB, struct sk_buff, data_len, r_TMP);
405 emit_sub(r_HEADLEN, r_TMP, r_HEADLEN);
406 emit_loadptr(r_SKB, struct sk_buff, data, r_SKB_DATA);
409 emit_reg_move(O7, r_saved_O7);
411 switch (filter[0].code) {
414 case BPF_S_ANC_PROTOCOL:
415 case BPF_S_ANC_PKTTYPE:
416 case BPF_S_ANC_IFINDEX:
418 case BPF_S_ANC_RXHASH:
420 case BPF_S_ANC_QUEUE:
424 /* The first instruction sets the A register (or is
425 * a "RET 'constant'")
429 /* Make sure we dont leak kernel information to the
432 emit_clear(r_A); /* A = 0 */
435 for (i = 0; i < flen; i++) {
436 unsigned int K = filter[i].k;
437 unsigned int t_offset;
438 unsigned int f_offset;
442 switch (filter[i].code) {
443 case BPF_S_ALU_ADD_X: /* A += X; */
446 case BPF_S_ALU_ADD_K: /* A += K; */
449 case BPF_S_ALU_SUB_X: /* A -= X; */
452 case BPF_S_ALU_SUB_K: /* A -= K */
455 case BPF_S_ALU_AND_X: /* A &= X */
458 case BPF_S_ALU_AND_K: /* A &= K */
461 case BPF_S_ALU_OR_X: /* A |= X */
464 case BPF_S_ALU_OR_K: /* A |= K */
467 case BPF_S_ALU_LSH_X: /* A <<= X */
470 case BPF_S_ALU_LSH_K: /* A <<= K */
473 case BPF_S_ALU_RSH_X: /* A >>= X */
476 case BPF_S_ALU_RSH_K: /* A >>= K */
479 case BPF_S_ALU_MUL_X: /* A *= X; */
482 case BPF_S_ALU_MUL_K: /* A *= K */
485 case BPF_S_ALU_DIV_K: /* A /= K */
489 case BPF_S_ALU_DIV_X: /* A /= X; */
492 t_offset = addrs[pc_ret0 - 1];
493 #ifdef CONFIG_SPARC32
494 emit_branch(BE, t_offset + 20);
496 emit_branch(BE, t_offset + 8);
498 emit_nop(); /* delay slot */
500 emit_branch_off(BNE, 16);
502 #ifdef CONFIG_SPARC32
503 emit_jump(cleanup_addr + 20);
505 emit_jump(cleanup_addr + 8);
510 #ifdef CONFIG_SPARC32
511 /* The Sparc v8 architecture requires
512 * three instructions between a %y
513 * register write and the first use.
530 emit_loadimm(K, r_A);
536 emit_jump(cleanup_addr);
540 if (seen_or_pass0 & SEEN_MEM) {
541 unsigned int sz = BASE_STACKFRAME;
542 sz += BPF_MEMWORDS * sizeof(u32);
543 emit_release_stack(sz);
546 /* jmpl %r_saved_O7 + 8, %g0 */
547 emit_jmpl(r_saved_O7, 8, G0);
548 emit_reg_move(r_A, O0); /* delay slot */
552 emit_reg_move(r_A, r_X);
556 emit_reg_move(r_X, r_A);
561 case BPF_S_ANC_PROTOCOL:
562 emit_skb_load16(protocol, r_A);
565 /* GCC won't let us take the address of
566 * a bit field even though we very much
567 * know what we are doing here.
569 case BPF_S_ANC_PKTTYPE:
570 __emit_skb_load8(pkt_type, r_A);
574 case BPF_S_ANC_IFINDEX:
575 emit_skb_loadptr(dev, r_A);
577 emit_branch(BNE_PTR, cleanup_addr + 4);
579 emit_load32(r_A, struct net_device, ifindex, r_A);
582 emit_skb_load32(mark, r_A);
584 case BPF_S_ANC_QUEUE:
585 emit_skb_load16(queue_mapping, r_A);
587 case BPF_S_ANC_HATYPE:
588 emit_skb_loadptr(dev, r_A);
590 emit_branch(BNE_PTR, cleanup_addr + 4);
592 emit_load16(r_A, struct net_device, type, r_A);
594 case BPF_S_ANC_RXHASH:
595 emit_skb_load32(rxhash, r_A);
599 emit_loadimm(K, r_A);
602 emit_loadimm(K, r_X);
605 emit_ldmem(K * 4, r_A);
608 emit_ldmem(K * 4, r_X);
611 emit_stmem(K * 4, r_A);
614 emit_stmem(K * 4, r_X);
617 #define CHOOSE_LOAD_FUNC(K, func) \
618 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
621 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word);
622 common_load: seen |= SEEN_DATAREF;
623 emit_loadimm(K, r_OFF);
627 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half);
630 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte);
632 case BPF_S_LDX_B_MSH:
633 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh);
636 func = bpf_jit_load_word;
637 common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
640 emit_addi(r_X, K, r_OFF);
642 emit_loadimm(K, r_TMP);
643 emit_add(r_X, r_TMP, r_OFF);
646 emit_reg_move(r_X, r_OFF);
651 func = bpf_jit_load_half;
652 goto common_load_ind;
654 func = bpf_jit_load_byte;
655 goto common_load_ind;
657 emit_jump(addrs[i + K]);
661 #define COND_SEL(CODE, TOP, FOP) \
667 COND_SEL(BPF_S_JMP_JGT_K, BGU, BLEU);
668 COND_SEL(BPF_S_JMP_JGE_K, BGEU, BLU);
669 COND_SEL(BPF_S_JMP_JEQ_K, BE, BNE);
670 COND_SEL(BPF_S_JMP_JSET_K, BNE, BE);
671 COND_SEL(BPF_S_JMP_JGT_X, BGU, BLEU);
672 COND_SEL(BPF_S_JMP_JGE_X, BGEU, BLU);
673 COND_SEL(BPF_S_JMP_JEQ_X, BE, BNE);
674 COND_SEL(BPF_S_JMP_JSET_X, BNE, BE);
676 cond_branch: f_offset = addrs[i + filter[i].jf];
677 t_offset = addrs[i + filter[i].jt];
679 /* same targets, can avoid doing the test :) */
680 if (filter[i].jt == filter[i].jf) {
686 switch (filter[i].code) {
687 case BPF_S_JMP_JGT_X:
688 case BPF_S_JMP_JGE_X:
689 case BPF_S_JMP_JEQ_X:
693 case BPF_S_JMP_JSET_X:
697 case BPF_S_JMP_JEQ_K:
698 case BPF_S_JMP_JGT_K:
699 case BPF_S_JMP_JGE_K:
703 emit_loadimm(K, r_TMP);
704 emit_cmp(r_A, r_TMP);
707 case BPF_S_JMP_JSET_K:
711 emit_loadimm(K, r_TMP);
712 emit_btst(r_A, r_TMP);
716 if (filter[i].jt != 0) {
719 emit_branch(t_op, t_offset);
720 emit_nop(); /* delay slot */
727 emit_branch(f_op, f_offset);
728 emit_nop(); /* delay slot */
732 /* hmm, too complex filter, give up with jit compiler */
735 ilen = (void *) prog - (void *) temp;
737 if (unlikely(proglen + ilen > oldproglen)) {
738 pr_err("bpb_jit_compile fatal error\n");
740 module_free(NULL, image);
743 memcpy(image + proglen, temp, ilen);
749 /* last bpf instruction is always a RET :
750 * use it to give the cleanup instruction(s) addr
752 cleanup_addr = proglen - 8; /* jmpl; mov r_A,%o0; */
753 if (seen_or_pass0 & SEEN_MEM)
754 cleanup_addr -= 4; /* add %sp, X, %sp; */
757 if (proglen != oldproglen)
758 pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n",
759 proglen, oldproglen);
762 if (proglen == oldproglen) {
763 image = module_alloc(max_t(unsigned int,
765 sizeof(struct work_struct)));
769 oldproglen = proglen;
772 if (bpf_jit_enable > 1)
773 pr_err("flen=%d proglen=%u pass=%d image=%p\n",
774 flen, proglen, pass, image);
777 if (bpf_jit_enable > 1)
778 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS,
779 16, 1, image, proglen, false);
780 bpf_flush_icache(image, image + proglen);
781 fp->bpf_func = (void *)image;
788 static void jit_free_defer(struct work_struct *arg)
790 module_free(NULL, arg);
793 /* run from softirq, we must use a work_struct to call
794 * module_free() from process context
796 void bpf_jit_free(struct sk_filter *fp)
798 if (fp->bpf_func != sk_run_filter) {
799 struct work_struct *work = (struct work_struct *)fp->bpf_func;
801 INIT_WORK(work, jit_free_defer);