1 /* bpf_jit_comp.c: BPF JIT compiler for PPC64
3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
5 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
12 #include <linux/moduleloader.h>
13 #include <asm/cacheflush.h>
14 #include <linux/netdevice.h>
15 #include <linux/filter.h>
19 /* There are endianness assumptions herein. */
20 #error "Little-endian PPC not supported in BPF compiler"
23 int bpf_jit_enable __read_mostly;
26 static inline void bpf_flush_icache(void *start, void *end)
29 flush_icache_range((unsigned long)start, (unsigned long)end);
32 static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
33 struct codegen_context *ctx)
36 const struct sock_filter *filter = fp->insns;
38 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
40 if (ctx->seen & SEEN_DATAREF) {
41 /* If we call any helpers (for loads), save LR */
42 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
45 /* Back up non-volatile regs. */
46 PPC_STD(r_D, 1, -(8*(32-r_D)));
47 PPC_STD(r_HL, 1, -(8*(32-r_HL)));
49 if (ctx->seen & SEEN_MEM) {
51 * Conditionally save regs r15-r31 as some will be used
54 for (i = r_M; i < (r_M+16); i++) {
55 if (ctx->seen & (1 << (i-r_M)))
56 PPC_STD(i, 1, -(8*(32-i)));
59 EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) |
60 (-BPF_PPC_STACKFRAME & 0xfffc));
63 if (ctx->seen & SEEN_DATAREF) {
65 * If this filter needs to access skb data,
66 * prepare r_D and r_HL:
67 * r_HL = skb->len - skb->data_len
70 PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
72 PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
73 PPC_SUB(r_HL, r_HL, r_scratch1);
74 PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
77 if (ctx->seen & SEEN_XREG) {
79 * TODO: Could also detect whether first instr. sets X and
80 * avoid this (as below, with A).
85 switch (filter[0].code) {
88 case BPF_S_ANC_PROTOCOL:
89 case BPF_S_ANC_IFINDEX:
91 case BPF_S_ANC_RXHASH:
97 /* first instruction sets A register (or is RET 'constant') */
100 /* make sure we dont leak kernel information to user */
105 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
109 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
110 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
111 if (ctx->seen & SEEN_DATAREF) {
114 PPC_LD(r_D, 1, -(8*(32-r_D)));
115 PPC_LD(r_HL, 1, -(8*(32-r_HL)));
117 if (ctx->seen & SEEN_MEM) {
118 /* Restore any saved non-vol registers */
119 for (i = r_M; i < (r_M+16); i++) {
120 if (ctx->seen & (1 << (i-r_M)))
121 PPC_LD(i, 1, -(8*(32-i)));
125 /* The RETs have left a return value in R3. */
130 #define CHOOSE_LOAD_FUNC(K, func) \
131 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
133 /* Assemble the body code between the prologue & epilogue. */
134 static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
135 struct codegen_context *ctx,
138 const struct sock_filter *filter = fp->insns;
141 unsigned int true_cond;
144 /* Start of epilogue code */
145 unsigned int exit_addr = addrs[flen];
147 for (i = 0; i < flen; i++) {
148 unsigned int K = filter[i].k;
151 * addrs[] maps a BPF bytecode address into a real offset from
152 * the start of the body code.
154 addrs[i] = ctx->idx * 4;
156 switch (filter[i].code) {
158 case BPF_S_ALU_ADD_X: /* A += X; */
159 ctx->seen |= SEEN_XREG;
160 PPC_ADD(r_A, r_A, r_X);
162 case BPF_S_ALU_ADD_K: /* A += K; */
165 PPC_ADDI(r_A, r_A, IMM_L(K));
167 PPC_ADDIS(r_A, r_A, IMM_HA(K));
169 case BPF_S_ALU_SUB_X: /* A -= X; */
170 ctx->seen |= SEEN_XREG;
171 PPC_SUB(r_A, r_A, r_X);
173 case BPF_S_ALU_SUB_K: /* A -= K */
176 PPC_ADDI(r_A, r_A, IMM_L(-K));
178 PPC_ADDIS(r_A, r_A, IMM_HA(-K));
180 case BPF_S_ALU_MUL_X: /* A *= X; */
181 ctx->seen |= SEEN_XREG;
182 PPC_MUL(r_A, r_A, r_X);
184 case BPF_S_ALU_MUL_K: /* A *= K */
186 PPC_MULI(r_A, r_A, K);
188 PPC_LI32(r_scratch1, K);
189 PPC_MUL(r_A, r_A, r_scratch1);
192 case BPF_S_ALU_DIV_X: /* A /= X; */
193 ctx->seen |= SEEN_XREG;
195 if (ctx->pc_ret0 != -1) {
196 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
199 * Exit, returning 0; first pass hits here
200 * (longer worst-case code size).
202 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
206 PPC_DIVWU(r_A, r_A, r_X);
208 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
209 PPC_LI32(r_scratch1, K);
210 /* Top 32 bits of 64bit result -> A */
211 PPC_MULHWU(r_A, r_A, r_scratch1);
213 case BPF_S_ALU_AND_X:
214 ctx->seen |= SEEN_XREG;
215 PPC_AND(r_A, r_A, r_X);
217 case BPF_S_ALU_AND_K:
219 PPC_ANDI(r_A, r_A, K);
221 PPC_LI32(r_scratch1, K);
222 PPC_AND(r_A, r_A, r_scratch1);
226 ctx->seen |= SEEN_XREG;
227 PPC_OR(r_A, r_A, r_X);
231 PPC_ORI(r_A, r_A, IMM_L(K));
233 PPC_ORIS(r_A, r_A, IMM_H(K));
235 case BPF_S_ALU_LSH_X: /* A <<= X; */
236 ctx->seen |= SEEN_XREG;
237 PPC_SLW(r_A, r_A, r_X);
239 case BPF_S_ALU_LSH_K:
243 PPC_SLWI(r_A, r_A, K);
245 case BPF_S_ALU_RSH_X: /* A >>= X; */
246 ctx->seen |= SEEN_XREG;
247 PPC_SRW(r_A, r_A, r_X);
249 case BPF_S_ALU_RSH_K: /* A >>= K; */
253 PPC_SRWI(r_A, r_A, K);
261 if (ctx->pc_ret0 == -1)
265 * If this isn't the very last instruction, branch to
266 * the epilogue if we've stuff to clean up. Otherwise,
267 * if there's nothing to tidy, just return. If we /are/
268 * the last instruction, we're about to fall through to
269 * the epilogue to return.
273 * Note: 'seen' is properly valid only on pass
274 * #2. Both parts of this conditional are the
275 * same instruction size though, meaning the
276 * first pass will still correctly determine the
277 * code size/addresses.
294 case BPF_S_MISC_TAX: /* X = A */
297 case BPF_S_MISC_TXA: /* A = X */
298 ctx->seen |= SEEN_XREG;
302 /*** Constant loads/M[] access ***/
303 case BPF_S_LD_IMM: /* A = K */
306 case BPF_S_LDX_IMM: /* X = K */
309 case BPF_S_LD_MEM: /* A = mem[K] */
310 PPC_MR(r_A, r_M + (K & 0xf));
311 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
313 case BPF_S_LDX_MEM: /* X = mem[K] */
314 PPC_MR(r_X, r_M + (K & 0xf));
315 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
317 case BPF_S_ST: /* mem[K] = A */
318 PPC_MR(r_M + (K & 0xf), r_A);
319 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
321 case BPF_S_STX: /* mem[K] = X */
322 PPC_MR(r_M + (K & 0xf), r_X);
323 ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
325 case BPF_S_LD_W_LEN: /* A = skb->len; */
326 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
327 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
329 case BPF_S_LDX_W_LEN: /* X = skb->len; */
330 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
333 /*** Ancillary info loads ***/
335 /* None of the BPF_S_ANC* codes appear to be passed by
336 * sk_chk_filter(). The interpreter and the x86 BPF
337 * compiler implement them so we do too -- they may be
340 case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
341 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
343 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
345 /* ntohs is a NOP with BE loads. */
347 case BPF_S_ANC_IFINDEX:
348 PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
350 PPC_CMPDI(r_scratch1, 0);
351 if (ctx->pc_ret0 != -1) {
352 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
354 /* Exit, returning 0; first pass hits here. */
355 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
359 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
361 PPC_LWZ_OFFS(r_A, r_scratch1,
362 offsetof(struct net_device, ifindex));
365 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
366 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
369 case BPF_S_ANC_RXHASH:
370 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
371 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
374 case BPF_S_ANC_QUEUE:
375 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
376 queue_mapping) != 2);
377 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
384 * raw_smp_processor_id() = local_paca->paca_index
386 BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
388 PPC_LHZ_OFFS(r_A, 13,
389 offsetof(struct paca_struct, paca_index));
395 /*** Absolute loads from packet header/data ***/
397 func = CHOOSE_LOAD_FUNC(K, sk_load_word);
400 func = CHOOSE_LOAD_FUNC(K, sk_load_half);
403 func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
406 ctx->seen |= SEEN_DATAREF;
407 PPC_LI64(r_scratch1, func);
408 PPC_MTLR(r_scratch1);
412 * Helper returns 'lt' condition on error, and an
413 * appropriate return value in r3
415 PPC_BCC(COND_LT, exit_addr);
418 /*** Indirect loads from packet header/data ***/
421 goto common_load_ind;
424 goto common_load_ind;
429 * Load from [X + K]. Negative offsets are tested for
430 * in the helper functions.
432 ctx->seen |= SEEN_DATAREF | SEEN_XREG;
433 PPC_LI64(r_scratch1, func);
434 PPC_MTLR(r_scratch1);
435 PPC_ADDI(r_addr, r_X, IMM_L(K));
437 PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
439 /* If error, cr0.LT set */
440 PPC_BCC(COND_LT, exit_addr);
443 case BPF_S_LDX_B_MSH:
444 func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
448 /*** Jump and branches ***/
451 PPC_JMP(addrs[i + 1 + K]);
454 case BPF_S_JMP_JGT_K:
455 case BPF_S_JMP_JGT_X:
458 case BPF_S_JMP_JGE_K:
459 case BPF_S_JMP_JGE_X:
462 case BPF_S_JMP_JEQ_K:
463 case BPF_S_JMP_JEQ_X:
466 case BPF_S_JMP_JSET_K:
467 case BPF_S_JMP_JSET_X:
471 /* same targets, can avoid doing the test :) */
472 if (filter[i].jt == filter[i].jf) {
473 if (filter[i].jt > 0)
474 PPC_JMP(addrs[i + 1 + filter[i].jt]);
478 switch (filter[i].code) {
479 case BPF_S_JMP_JGT_X:
480 case BPF_S_JMP_JGE_X:
481 case BPF_S_JMP_JEQ_X:
482 ctx->seen |= SEEN_XREG;
485 case BPF_S_JMP_JSET_X:
486 ctx->seen |= SEEN_XREG;
487 PPC_AND_DOT(r_scratch1, r_A, r_X);
489 case BPF_S_JMP_JEQ_K:
490 case BPF_S_JMP_JGT_K:
491 case BPF_S_JMP_JGE_K:
495 PPC_LI32(r_scratch1, K);
496 PPC_CMPLW(r_A, r_scratch1);
499 case BPF_S_JMP_JSET_K:
501 /* PPC_ANDI is /only/ dot-form */
502 PPC_ANDI(r_scratch1, r_A, K);
504 PPC_LI32(r_scratch1, K);
505 PPC_AND_DOT(r_scratch1, r_A,
510 /* Sometimes branches are constructed "backward", with
511 * the false path being the branch and true path being
512 * a fallthrough to the next instruction.
514 if (filter[i].jt == 0)
515 /* Swap the sense of the branch */
516 PPC_BCC(true_cond ^ COND_CMP_TRUE,
517 addrs[i + 1 + filter[i].jf]);
519 PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
520 if (filter[i].jf != 0)
521 PPC_JMP(addrs[i + 1 + filter[i].jf]);
525 /* The filter contains something cruel & unusual.
526 * We don't handle it, but also there shouldn't be
527 * anything missing from our list.
529 if (printk_ratelimit())
530 pr_err("BPF filter opcode %04x (@%d) unsupported\n",
536 /* Set end-of-body-code address for exit. */
537 addrs[i] = ctx->idx * 4;
542 void bpf_jit_compile(struct sk_filter *fp)
544 unsigned int proglen;
545 unsigned int alloclen;
549 struct codegen_context cgctx;
556 addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
561 * There are multiple assembly passes as the generated code will change
562 * size as it settles down, figuring out the max branch offsets/exit
565 * The range of standard conditional branches is +/- 32Kbytes. Since
566 * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
567 * finish with 8 bytes/instruction. Not feasible, so long jumps are
568 * used, distinct from short branches.
572 * For now, both branch types assemble to 2 words (short branches padded
573 * with a NOP); this is less efficient, but assembly will always complete
574 * after exactly 3 passes:
576 * First pass: No code buffer; Program is "faux-generated" -- no code
577 * emitted but maximum size of output determined (and addrs[] filled
578 * in). Also, we note whether we use M[], whether we use skb data, etc.
579 * All generation choices assumed to be 'worst-case', e.g. branches all
580 * far (2 instructions), return path code reduction not available, etc.
582 * Second pass: Code buffer allocated with size determined previously.
583 * Prologue generated to support features we have seen used. Exit paths
584 * determined and addrs[] is filled in again, as code may be slightly
585 * smaller as a result.
587 * Third pass: Code generated 'for real', and branch destinations
588 * determined from now-accurate addrs[] map.
592 * If we optimise this, near branches will be shorter. On the
593 * first assembly pass, we should err on the side of caution and
594 * generate the biggest code. On subsequent passes, branches will be
595 * generated short or long and code size will reduce. With smaller
596 * code, more branches may fall into the short category, and code will
599 * Finally, if we see one pass generate code the same size as the
600 * previous pass we have converged and should now generate code for
601 * real. Allocating at the end will also save the memory that would
602 * otherwise be wasted by the (small) current code shrinkage.
603 * Preferably, we should do a small number of passes (e.g. 5) and if we
604 * haven't converged by then, get impatient and force code to generate
605 * as-is, even if the odd branch would be left long. The chances of a
606 * long jump are tiny with all but the most enormous of BPF filter
607 * inputs, so we should usually converge on the third pass.
613 /* Scouting faux-generate pass 0 */
614 if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
615 /* We hit something illegal or unsupported. */
619 * Pretend to build prologue, given the features we've seen. This will
620 * update ctgtx.idx as it pretends to output instructions, then we can
621 * calculate total size from idx.
623 bpf_jit_build_prologue(fp, 0, &cgctx);
624 bpf_jit_build_epilogue(0, &cgctx);
626 proglen = cgctx.idx * 4;
627 alloclen = proglen + FUNCTION_DESCR_SIZE;
628 image = module_alloc(max_t(unsigned int, alloclen,
629 sizeof(struct work_struct)));
633 code_base = image + (FUNCTION_DESCR_SIZE/4);
635 /* Code generation passes 1-2 */
636 for (pass = 1; pass < 3; pass++) {
637 /* Now build the prologue, body code & epilogue for real. */
639 bpf_jit_build_prologue(fp, code_base, &cgctx);
640 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
641 bpf_jit_build_epilogue(code_base, &cgctx);
643 if (bpf_jit_enable > 1)
644 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
645 proglen - (cgctx.idx * 4), cgctx.seen);
648 if (bpf_jit_enable > 1)
649 pr_info("flen=%d proglen=%u pass=%d image=%p\n",
650 flen, proglen, pass, image);
653 if (bpf_jit_enable > 1)
654 print_hex_dump(KERN_ERR, "JIT code: ",
659 bpf_flush_icache(code_base, code_base + (proglen/4));
660 /* Function descriptor nastiness: Address + TOC */
661 ((u64 *)image)[0] = (u64)code_base;
662 ((u64 *)image)[1] = local_paca->kernel_toc;
663 fp->bpf_func = (void *)image;
670 static void jit_free_defer(struct work_struct *arg)
672 module_free(NULL, arg);
675 /* run from softirq, we must use a work_struct to call
676 * module_free() from process context
678 void bpf_jit_free(struct sk_filter *fp)
680 if (fp->bpf_func != sk_run_filter) {
681 struct work_struct *work = (struct work_struct *)fp->bpf_func;
683 INIT_WORK(work, jit_free_defer);