4 * Copyright (C) 2015 He Kuang <hekuang@huawei.com>
5 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6 * Copyright (C) 2015 Huawei Inc.
9 #include <bpf/libbpf.h>
12 #include "bpf-loader.h"
13 #include "bpf-prologue.h"
14 #include "probe-finder.h"
16 #include <dwarf-regs.h>
17 #include <linux/filter.h>
19 #define BPF_REG_SIZE 8
21 #define JMP_TO_ERROR_CODE -1
22 #define JMP_TO_SUCCESS_CODE -2
23 #define JMP_TO_USER_CODE -3
26 struct bpf_insn *begin;
32 pos_get_cnt(struct bpf_insn_pos *pos)
34 return pos->pos - pos->begin;
38 append_insn(struct bpf_insn new_insn, struct bpf_insn_pos *pos)
41 return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
43 if (pos->pos + 1 >= pos->end) {
44 pr_err("bpf prologue: prologue too long\n");
46 return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
49 *(pos->pos)++ = new_insn;
54 check_pos(struct bpf_insn_pos *pos)
56 if (!pos->pos || pos->pos >= pos->end)
57 return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
61 /* Give it a shorter name */
62 #define ins(i, p) append_insn((i), (p))
65 * Give a register name (in 'reg'), generate instruction to
66 * load register into an eBPF register rd:
67 * 'ldd target_reg, offset(ctx_reg)', where:
68 * ctx_reg is pre initialized to pointer of 'struct pt_regs'.
71 gen_ldx_reg_from_ctx(struct bpf_insn_pos *pos, int ctx_reg,
72 const char *reg, int target_reg)
74 int offset = regs_query_register_offset(reg);
77 pr_err("bpf: prologue: failed to get register %s\n",
81 ins(BPF_LDX_MEM(BPF_DW, target_reg, ctx_reg, offset), pos);
83 return check_pos(pos);
87 * Generate a BPF_FUNC_probe_read function call.
89 * src_base_addr_reg is a register holding base address,
90 * dst_addr_reg is a register holding dest address (on stack),
93 * *[dst_addr_reg] = *([src_base_addr_reg] + offset)
95 * Arguments of BPF_FUNC_probe_read:
96 * ARG1: ptr to stack (dest)
98 * ARG3: unsafe ptr (src)
101 gen_read_mem(struct bpf_insn_pos *pos,
102 int src_base_addr_reg,
106 /* mov arg3, src_base_addr_reg */
107 if (src_base_addr_reg != BPF_REG_ARG3)
108 ins(BPF_MOV64_REG(BPF_REG_ARG3, src_base_addr_reg), pos);
109 /* add arg3, #offset */
111 ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, offset), pos);
113 /* mov arg2, #reg_size */
114 ins(BPF_ALU64_IMM(BPF_MOV, BPF_REG_ARG2, BPF_REG_SIZE), pos);
116 /* mov arg1, dst_addr_reg */
117 if (dst_addr_reg != BPF_REG_ARG1)
118 ins(BPF_MOV64_REG(BPF_REG_ARG1, dst_addr_reg), pos);
120 /* Call probe_read */
121 ins(BPF_EMIT_CALL(BPF_FUNC_probe_read), pos);
123 * Error processing: if read fail, goto error code,
124 * will be relocated. Target should be the start of
125 * error processing code.
127 ins(BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, JMP_TO_ERROR_CODE),
130 return check_pos(pos);
134 * Each arg should be bare register. Fetch and save them into argument
135 * registers (r3 - r5).
137 * BPF_REG_1 should have been initialized with pointer to
141 gen_prologue_fastpath(struct bpf_insn_pos *pos,
142 struct probe_trace_arg *args, int nargs)
146 for (i = 0; i < nargs; i++) {
147 err = gen_ldx_reg_from_ctx(pos, BPF_REG_1, args[i].value,
148 BPF_PROLOGUE_START_ARG_REG + i);
153 return check_pos(pos);
160 * At least one argument has the form of 'offset($rx)'.
162 * Following code first stores them into stack, then loads all of then
164 * Before final loading, the final result should be:
167 * BPF_REG_FP - 24 ARG3
168 * BPF_REG_FP - 16 ARG2
169 * BPF_REG_FP - 8 ARG1
173 * For each argument (described as: offn(...off2(off1(reg)))),
174 * generates following code:
177 * r7 <- r7 - stack_offset // Ideal code should initialize r7 using
178 * // fp before generating args. However,
179 * // eBPF won't regard r7 as stack pointer
180 * // if it is generated by minus 8 from
181 * // another stack pointer except fp.
182 * // This is why we have to set r7
183 * // to fp for each variable.
184 * r3 <- value of 'reg'-> generated using gen_ldx_reg_from_ctx()
185 * (r7) <- r3 // skip following instructions for bare reg
186 * r3 <- r3 + off1 . // skip if off1 == 0
188 * r1 <- r7 |-> generated by gen_read_mem()
192 * r3 <- r3 + off2 . // skip if off2 == 0
193 * r2 <- 8 \ // r2 may be broken by probe_read, so set again
194 * r1 <- r7 |-> generated by gen_read_mem()
200 gen_prologue_slowpath(struct bpf_insn_pos *pos,
201 struct probe_trace_arg *args, int nargs)
205 for (i = 0; i < nargs; i++) {
206 struct probe_trace_arg *arg = &args[i];
207 const char *reg = arg->value;
208 struct probe_trace_arg_ref *ref = NULL;
209 int stack_offset = (i + 1) * -8;
211 pr_debug("prologue: fetch arg %d, base reg is %s\n",
214 /* value of base register is stored into ARG3 */
215 err = gen_ldx_reg_from_ctx(pos, BPF_REG_CTX, reg,
218 pr_err("prologue: failed to get offset of register %s\n",
223 /* Make r7 the stack pointer. */
224 ins(BPF_MOV64_REG(BPF_REG_7, BPF_REG_FP), pos);
226 ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, stack_offset), pos);
228 * Store r3 (base register) onto stack
229 * Ensure fp[offset] is set.
230 * fp is the only valid base register when storing
231 * into stack. We are not allowed to use r7 as base
234 ins(BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
239 pr_debug("prologue: arg %d: offset %ld\n",
241 err = gen_read_mem(pos, BPF_REG_3, BPF_REG_7,
244 pr_err("prologue: failed to generate probe_read function call\n");
250 * Load previous result into ARG3. Use
251 * BPF_REG_FP instead of r7 because verifier
252 * allows FP based addressing only.
255 ins(BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3,
256 BPF_REG_FP, stack_offset), pos);
260 /* Final pass: read to registers */
261 for (i = 0; i < nargs; i++)
262 ins(BPF_LDX_MEM(BPF_DW, BPF_PROLOGUE_START_ARG_REG + i,
263 BPF_REG_FP, -BPF_REG_SIZE * (i + 1)), pos);
265 ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_SUCCESS_CODE), pos);
267 return check_pos(pos);
273 prologue_relocate(struct bpf_insn_pos *pos, struct bpf_insn *error_code,
274 struct bpf_insn *success_code, struct bpf_insn *user_code)
276 struct bpf_insn *insn;
279 return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
281 for (insn = pos->begin; insn < pos->pos; insn++) {
282 struct bpf_insn *target;
283 u8 class = BPF_CLASS(insn->code);
286 if (class != BPF_JMP)
288 opcode = BPF_OP(insn->code);
289 if (opcode == BPF_CALL)
293 case JMP_TO_ERROR_CODE:
296 case JMP_TO_SUCCESS_CODE:
297 target = success_code;
299 case JMP_TO_USER_CODE:
303 pr_err("bpf prologue: internal error: relocation failed\n");
304 return -BPF_LOADER_ERRNO__PROLOGUE;
307 insn->off = target - (insn + 1);
312 int bpf__gen_prologue(struct probe_trace_arg *args, int nargs,
313 struct bpf_insn *new_prog, size_t *new_cnt,
316 struct bpf_insn *success_code = NULL;
317 struct bpf_insn *error_code = NULL;
318 struct bpf_insn *user_code = NULL;
319 struct bpf_insn_pos pos;
320 bool fastpath = true;
323 if (!new_prog || !new_cnt)
326 if (cnt_space > BPF_MAXINSNS)
327 cnt_space = BPF_MAXINSNS;
329 pos.begin = new_prog;
330 pos.end = new_prog + cnt_space;
334 ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0),
340 *new_cnt = pos_get_cnt(&pos);
344 if (nargs > BPF_PROLOGUE_MAX_ARGS) {
345 pr_warning("bpf: prologue: %d arguments are dropped\n",
346 nargs - BPF_PROLOGUE_MAX_ARGS);
347 nargs = BPF_PROLOGUE_MAX_ARGS;
350 /* First pass: validation */
351 for (i = 0; i < nargs; i++) {
352 struct probe_trace_arg_ref *ref = args[i].ref;
354 if (args[i].value[0] == '@') {
355 /* TODO: fetch global variable */
356 pr_err("bpf: prologue: global %s%+ld not support\n",
357 args[i].value, ref ? ref->offset : 0);
362 /* fastpath is true if all args has ref == NULL */
366 * Instruction encodes immediate value using
367 * s32, ref->offset is long. On systems which
368 * can't fill long in s32, refuse to process if
369 * ref->offset too large (or small).
372 #define OFFSET_MAX ((1LL << 31) - 1)
373 #define OFFSET_MIN ((1LL << 31) * -1)
374 if (ref->offset > OFFSET_MAX ||
375 ref->offset < OFFSET_MIN) {
376 pr_err("bpf: prologue: offset out of bound: %ld\n",
378 return -BPF_LOADER_ERRNO__PROLOGUEOOB;
384 pr_debug("prologue: pass validation\n");
387 /* If all variables are registers... */
388 pr_debug("prologue: fast path\n");
389 err = gen_prologue_fastpath(&pos, args, nargs);
393 pr_debug("prologue: slow path\n");
395 /* Initialization: move ctx to a callee saved register. */
396 ins(BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1), &pos);
398 err = gen_prologue_slowpath(&pos, args, nargs);
402 * start of ERROR_CODE (only slow pass needs error code)
403 * mov r2 <- 1 // r2 is error number
404 * mov r3 <- 0 // r3, r4... should be touched or
405 * // verifier would complain
410 error_code = pos.pos;
411 ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 1),
414 for (i = 0; i < nargs; i++)
415 ins(BPF_ALU64_IMM(BPF_MOV,
416 BPF_PROLOGUE_START_ARG_REG + i,
419 ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_USER_CODE),
424 * start of SUCCESS_CODE:
426 * goto usercode // skip
428 success_code = pos.pos;
429 ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0), &pos);
432 * start of USER_CODE:
438 * Only slow path needs restoring of ctx. In fast path,
439 * register are loaded directly from r1.
441 ins(BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX), &pos);
442 err = prologue_relocate(&pos, error_code, success_code,
448 err = check_pos(&pos);
452 *new_cnt = pos_get_cnt(&pos);