2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
12 #include <linux/bpf.h>
14 #include <linux/unistd.h>
16 #include <linux/filter.h>
19 #include <sys/resource.h>
23 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
29 struct bpf_insn insns[MAX_INSNS];
30 int fixup[MAX_FIXUPS];
31 int prog_array_fixup[MAX_FIXUPS];
33 const char *errstr_unpriv;
38 } result, result_unpriv;
39 enum bpf_prog_type prog_type;
42 static struct bpf_test tests[] = {
46 BPF_MOV64_IMM(BPF_REG_1, 1),
47 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
48 BPF_MOV64_IMM(BPF_REG_2, 3),
49 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
50 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
51 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
52 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
63 .errstr = "unreachable",
69 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
70 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
73 .errstr = "unreachable",
79 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
82 .errstr = "jump out of range",
88 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
91 .errstr = "jump out of range",
97 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
98 BPF_LD_IMM64(BPF_REG_0, 0),
99 BPF_LD_IMM64(BPF_REG_0, 0),
100 BPF_LD_IMM64(BPF_REG_0, 1),
101 BPF_LD_IMM64(BPF_REG_0, 1),
102 BPF_MOV64_IMM(BPF_REG_0, 2),
105 .errstr = "invalid BPF_LD_IMM insn",
106 .errstr_unpriv = "R1 pointer comparison",
112 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
113 BPF_LD_IMM64(BPF_REG_0, 0),
114 BPF_LD_IMM64(BPF_REG_0, 0),
115 BPF_LD_IMM64(BPF_REG_0, 1),
116 BPF_LD_IMM64(BPF_REG_0, 1),
119 .errstr = "invalid BPF_LD_IMM insn",
120 .errstr_unpriv = "R1 pointer comparison",
126 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
127 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
128 BPF_LD_IMM64(BPF_REG_0, 0),
129 BPF_LD_IMM64(BPF_REG_0, 0),
130 BPF_LD_IMM64(BPF_REG_0, 1),
131 BPF_LD_IMM64(BPF_REG_0, 1),
134 .errstr = "invalid bpf_ld_imm64 insn",
140 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
143 .errstr = "invalid bpf_ld_imm64 insn",
149 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
151 .errstr = "invalid bpf_ld_imm64 insn",
157 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
159 .errstr = "jump out of range",
165 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
168 .errstr = "back-edge",
174 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
175 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
176 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
177 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
180 .errstr = "back-edge",
186 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
187 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
188 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
189 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
192 .errstr = "back-edge",
196 "read uninitialized register",
198 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
201 .errstr = "R2 !read_ok",
205 "read invalid register",
207 BPF_MOV64_REG(BPF_REG_0, -1),
210 .errstr = "R15 is invalid",
214 "program doesn't init R0 before exit",
216 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
219 .errstr = "R0 !read_ok",
223 "program doesn't init R0 before exit in all branches",
225 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
226 BPF_MOV64_IMM(BPF_REG_0, 1),
227 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
230 .errstr = "R0 !read_ok",
231 .errstr_unpriv = "R1 pointer comparison",
235 "stack out of bounds",
237 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
240 .errstr = "invalid stack",
244 "invalid call insn1",
246 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
249 .errstr = "BPF_CALL uses reserved",
253 "invalid call insn2",
255 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
258 .errstr = "BPF_CALL uses reserved",
262 "invalid function call",
264 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
267 .errstr = "invalid func 1234567",
271 "uninitialized stack1",
273 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
275 BPF_LD_MAP_FD(BPF_REG_1, 0),
276 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
280 .errstr = "invalid indirect read from stack",
284 "uninitialized stack2",
286 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
287 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
290 .errstr = "invalid read from stack",
294 "check valid spill/fill",
296 /* spill R1(ctx) into stack */
297 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
299 /* fill it back into R2 */
300 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
302 /* should be able to access R0 = *(R2 + 8) */
303 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
304 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
307 .errstr_unpriv = "R0 leaks addr",
309 .result_unpriv = REJECT,
312 "check corrupted spill/fill",
314 /* spill R1(ctx) into stack */
315 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
317 /* mess up with R1 pointer on stack */
318 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
320 /* fill back into R0 should fail */
321 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
325 .errstr_unpriv = "attempt to corrupt spilled",
326 .errstr = "corrupted spill",
330 "invalid src register in STX",
332 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
335 .errstr = "R15 is invalid",
339 "invalid dst register in STX",
341 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
344 .errstr = "R14 is invalid",
348 "invalid dst register in ST",
350 BPF_ST_MEM(BPF_B, 14, -1, -1),
353 .errstr = "R14 is invalid",
357 "invalid src register in LDX",
359 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
362 .errstr = "R12 is invalid",
366 "invalid dst register in LDX",
368 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
371 .errstr = "R11 is invalid",
377 BPF_RAW_INSN(0, 0, 0, 0, 0),
380 .errstr = "invalid BPF_LD_IMM",
386 BPF_RAW_INSN(1, 0, 0, 0, 0),
389 .errstr = "BPF_LDX uses reserved fields",
395 BPF_RAW_INSN(-1, 0, 0, 0, 0),
398 .errstr = "invalid BPF_ALU opcode f0",
404 BPF_RAW_INSN(-1, -1, -1, -1, -1),
407 .errstr = "invalid BPF_ALU opcode f0",
413 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
416 .errstr = "BPF_ALU uses reserved fields",
420 "misaligned read from stack",
422 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
423 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
426 .errstr = "misaligned access",
430 "invalid map_fd for function call",
432 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
433 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
434 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
435 BPF_LD_MAP_FD(BPF_REG_1, 0),
436 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
439 .errstr = "fd 0 is not pointing to valid bpf_map",
443 "don't check return value before access",
445 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
446 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
447 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
448 BPF_LD_MAP_FD(BPF_REG_1, 0),
449 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
450 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
454 .errstr = "R0 invalid mem access 'map_value_or_null'",
458 "access memory with incorrect alignment",
460 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
461 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
462 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
463 BPF_LD_MAP_FD(BPF_REG_1, 0),
464 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
465 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
466 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
470 .errstr = "misaligned access",
474 "sometimes access memory with incorrect alignment",
476 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
477 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
478 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
479 BPF_LD_MAP_FD(BPF_REG_1, 0),
480 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
481 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
482 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
484 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
488 .errstr = "R0 invalid mem access",
489 .errstr_unpriv = "R0 leaks addr",
495 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
496 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
497 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
498 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
499 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
500 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
501 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
502 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
503 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
504 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
505 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
506 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
507 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
508 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
509 BPF_MOV64_IMM(BPF_REG_0, 0),
512 .errstr_unpriv = "R1 pointer comparison",
513 .result_unpriv = REJECT,
519 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
520 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
521 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
522 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
523 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
524 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
525 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
526 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
527 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
528 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
529 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
530 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
531 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
532 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
533 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
534 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
535 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
536 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
537 BPF_MOV64_IMM(BPF_REG_0, 0),
540 .errstr_unpriv = "R1 pointer comparison",
541 .result_unpriv = REJECT,
547 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
548 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
549 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
550 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
551 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
552 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
553 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
554 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
555 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
556 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
557 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
558 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
559 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
560 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
561 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
563 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
564 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
565 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
566 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
567 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
568 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
569 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
570 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
571 BPF_LD_MAP_FD(BPF_REG_1, 0),
572 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
576 .errstr_unpriv = "R1 pointer comparison",
577 .result_unpriv = REJECT,
583 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
584 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
585 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
586 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
587 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
588 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
589 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
590 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
591 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
592 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
593 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
594 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
595 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
596 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
597 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
598 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
599 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
600 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
601 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
602 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
603 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
604 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
605 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
606 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
607 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
608 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
609 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
610 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
611 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
612 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
613 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
614 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
615 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
616 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
617 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
618 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
619 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
620 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
621 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
622 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
623 BPF_MOV64_IMM(BPF_REG_0, 0),
626 .errstr_unpriv = "R1 pointer comparison",
627 .result_unpriv = REJECT,
633 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
634 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
635 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
636 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
637 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
638 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
639 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
640 BPF_MOV64_IMM(BPF_REG_0, 0),
641 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
642 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
643 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
644 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
645 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
646 BPF_MOV64_IMM(BPF_REG_0, 0),
647 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
648 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
649 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
650 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
651 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
652 BPF_MOV64_IMM(BPF_REG_0, 0),
653 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
654 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
655 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
656 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
657 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
658 BPF_MOV64_IMM(BPF_REG_0, 0),
659 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
660 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
661 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
662 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
663 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
664 BPF_MOV64_IMM(BPF_REG_0, 0),
667 .errstr_unpriv = "R1 pointer comparison",
668 .result_unpriv = REJECT,
672 "access skb fields ok",
674 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
675 offsetof(struct __sk_buff, len)),
676 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
677 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
678 offsetof(struct __sk_buff, mark)),
679 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
680 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
681 offsetof(struct __sk_buff, pkt_type)),
682 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
683 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
684 offsetof(struct __sk_buff, queue_mapping)),
685 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
686 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
687 offsetof(struct __sk_buff, protocol)),
688 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
689 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
690 offsetof(struct __sk_buff, vlan_present)),
691 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
692 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
693 offsetof(struct __sk_buff, vlan_tci)),
694 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
700 "access skb fields bad1",
702 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
705 .errstr = "invalid bpf_context access",
709 "access skb fields bad2",
711 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
712 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
713 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
714 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
715 BPF_LD_MAP_FD(BPF_REG_1, 0),
716 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
717 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
719 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
720 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
721 offsetof(struct __sk_buff, pkt_type)),
725 .errstr = "different pointers",
726 .errstr_unpriv = "R1 pointer comparison",
730 "access skb fields bad3",
732 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
733 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
734 offsetof(struct __sk_buff, pkt_type)),
736 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
737 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
739 BPF_LD_MAP_FD(BPF_REG_1, 0),
740 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
741 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
743 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
744 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
747 .errstr = "different pointers",
748 .errstr_unpriv = "R1 pointer comparison",
752 "access skb fields bad4",
754 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
755 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
756 offsetof(struct __sk_buff, len)),
757 BPF_MOV64_IMM(BPF_REG_0, 0),
759 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
760 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
761 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
762 BPF_LD_MAP_FD(BPF_REG_1, 0),
763 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
764 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
766 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
767 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
770 .errstr = "different pointers",
771 .errstr_unpriv = "R1 pointer comparison",
775 "check skb->mark is not writeable by sockets",
777 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
778 offsetof(struct __sk_buff, mark)),
781 .errstr = "invalid bpf_context access",
782 .errstr_unpriv = "R1 leaks addr",
786 "check skb->tc_index is not writeable by sockets",
788 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
789 offsetof(struct __sk_buff, tc_index)),
792 .errstr = "invalid bpf_context access",
793 .errstr_unpriv = "R1 leaks addr",
797 "check non-u32 access to cb",
799 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1,
800 offsetof(struct __sk_buff, cb[0])),
803 .errstr = "invalid bpf_context access",
804 .errstr_unpriv = "R1 leaks addr",
808 "check out of range skb->cb access",
810 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
811 offsetof(struct __sk_buff, cb[0]) + 256),
814 .errstr = "invalid bpf_context access",
817 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
820 "write skb fields from socket prog",
822 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
823 offsetof(struct __sk_buff, cb[4])),
824 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
825 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
826 offsetof(struct __sk_buff, mark)),
827 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
828 offsetof(struct __sk_buff, tc_index)),
829 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
830 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
831 offsetof(struct __sk_buff, cb[0])),
832 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
833 offsetof(struct __sk_buff, cb[2])),
837 .errstr_unpriv = "R1 leaks addr",
838 .result_unpriv = REJECT,
841 "write skb fields from tc_cls_act prog",
843 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
844 offsetof(struct __sk_buff, cb[0])),
845 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
846 offsetof(struct __sk_buff, mark)),
847 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
848 offsetof(struct __sk_buff, tc_index)),
849 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
850 offsetof(struct __sk_buff, tc_index)),
851 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
852 offsetof(struct __sk_buff, cb[3])),
856 .result_unpriv = REJECT,
858 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
861 "PTR_TO_STACK store/load",
863 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
864 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
865 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
866 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
872 "PTR_TO_STACK store/load - bad alignment on off",
874 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
875 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
876 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
877 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
881 .errstr = "misaligned access off -6 size 8",
884 "PTR_TO_STACK store/load - bad alignment on reg",
886 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
887 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
888 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
889 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
893 .errstr = "misaligned access off -2 size 8",
896 "PTR_TO_STACK store/load - out of bounds low",
898 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
899 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
900 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
901 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
905 .errstr = "invalid stack off=-79992 size=8",
908 "PTR_TO_STACK store/load - out of bounds high",
910 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
911 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
912 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
913 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
917 .errstr = "invalid stack off=0 size=8",
920 "unpriv: return pointer",
922 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
926 .result_unpriv = REJECT,
927 .errstr_unpriv = "R0 leaks addr",
930 "unpriv: add const to pointer",
932 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
933 BPF_MOV64_IMM(BPF_REG_0, 0),
937 .result_unpriv = REJECT,
938 .errstr_unpriv = "R1 pointer arithmetic",
941 "unpriv: add pointer to pointer",
943 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
944 BPF_MOV64_IMM(BPF_REG_0, 0),
948 .result_unpriv = REJECT,
949 .errstr_unpriv = "R1 pointer arithmetic",
952 "unpriv: neg pointer",
954 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
955 BPF_MOV64_IMM(BPF_REG_0, 0),
959 .result_unpriv = REJECT,
960 .errstr_unpriv = "R1 pointer arithmetic",
963 "unpriv: cmp pointer with const",
965 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
966 BPF_MOV64_IMM(BPF_REG_0, 0),
970 .result_unpriv = REJECT,
971 .errstr_unpriv = "R1 pointer comparison",
974 "unpriv: cmp pointer with pointer",
976 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
977 BPF_MOV64_IMM(BPF_REG_0, 0),
981 .result_unpriv = REJECT,
982 .errstr_unpriv = "R10 pointer comparison",
985 "unpriv: check that printk is disallowed",
987 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
988 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
989 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
990 BPF_MOV64_IMM(BPF_REG_2, 8),
991 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
992 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_trace_printk),
993 BPF_MOV64_IMM(BPF_REG_0, 0),
996 .errstr_unpriv = "unknown func 6",
997 .result_unpriv = REJECT,
1001 "unpriv: pass pointer to helper function",
1003 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1004 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1005 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1006 BPF_LD_MAP_FD(BPF_REG_1, 0),
1007 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1008 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
1009 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
1010 BPF_MOV64_IMM(BPF_REG_0, 0),
1014 .errstr_unpriv = "R4 leaks addr",
1015 .result_unpriv = REJECT,
1019 "unpriv: indirectly pass pointer on stack to helper function",
1021 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1022 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1023 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1024 BPF_LD_MAP_FD(BPF_REG_1, 0),
1025 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1026 BPF_MOV64_IMM(BPF_REG_0, 0),
1030 .errstr = "invalid indirect read from stack off -8+0 size 8",
1034 "unpriv: mangle pointer on stack 1",
1036 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1037 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
1038 BPF_MOV64_IMM(BPF_REG_0, 0),
1041 .errstr_unpriv = "attempt to corrupt spilled",
1042 .result_unpriv = REJECT,
1046 "unpriv: mangle pointer on stack 2",
1048 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1049 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
1050 BPF_MOV64_IMM(BPF_REG_0, 0),
1053 .errstr_unpriv = "attempt to corrupt spilled",
1054 .result_unpriv = REJECT,
1058 "unpriv: read pointer from stack in small chunks",
1060 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1061 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
1062 BPF_MOV64_IMM(BPF_REG_0, 0),
1065 .errstr = "invalid size",
1069 "unpriv: write pointer into ctx",
1071 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1072 BPF_MOV64_IMM(BPF_REG_0, 0),
1075 .errstr_unpriv = "R1 leaks addr",
1076 .result_unpriv = REJECT,
1077 .errstr = "invalid bpf_context access",
1081 "unpriv: write pointer into map elem value",
1083 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1084 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1085 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1086 BPF_LD_MAP_FD(BPF_REG_1, 0),
1087 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1088 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1089 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
1093 .errstr_unpriv = "R0 leaks addr",
1094 .result_unpriv = REJECT,
1098 "unpriv: partial copy of pointer",
1100 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
1101 BPF_MOV64_IMM(BPF_REG_0, 0),
1104 .errstr_unpriv = "R10 partial copy",
1105 .result_unpriv = REJECT,
1109 "unpriv: pass pointer to tail_call",
1111 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1112 BPF_LD_MAP_FD(BPF_REG_2, 0),
1113 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
1114 BPF_MOV64_IMM(BPF_REG_0, 0),
1117 .prog_array_fixup = {1},
1118 .errstr_unpriv = "R3 leaks addr into helper",
1119 .result_unpriv = REJECT,
1123 "unpriv: cmp map pointer with zero",
1125 BPF_MOV64_IMM(BPF_REG_1, 0),
1126 BPF_LD_MAP_FD(BPF_REG_1, 0),
1127 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1128 BPF_MOV64_IMM(BPF_REG_0, 0),
1132 .errstr_unpriv = "R1 pointer comparison",
1133 .result_unpriv = REJECT,
1137 "unpriv: write into frame pointer",
1139 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
1140 BPF_MOV64_IMM(BPF_REG_0, 0),
1143 .errstr = "frame pointer is read only",
1147 "unpriv: cmp of frame pointer",
1149 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
1150 BPF_MOV64_IMM(BPF_REG_0, 0),
1153 .errstr_unpriv = "R10 pointer comparison",
1154 .result_unpriv = REJECT,
1158 "unpriv: cmp of stack pointer",
1160 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1161 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1162 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
1163 BPF_MOV64_IMM(BPF_REG_0, 0),
1166 .errstr_unpriv = "R2 pointer comparison",
1167 .result_unpriv = REJECT,
1171 "unpriv: obfuscate stack pointer",
1173 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1174 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1175 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1176 BPF_MOV64_IMM(BPF_REG_0, 0),
1179 .errstr_unpriv = "R2 pointer arithmetic",
1180 .result_unpriv = REJECT,
1185 static int probe_filter_length(struct bpf_insn *fp)
1189 for (len = MAX_INSNS - 1; len > 0; --len)
1190 if (fp[len].code != 0 || fp[len].imm != 0)
1196 static int create_map(void)
1200 map_fd = bpf_create_map(BPF_MAP_TYPE_HASH,
1201 sizeof(long long), sizeof(long long), 1024);
1203 printf("failed to create map '%s'\n", strerror(errno));
1208 static int create_prog_array(void)
1212 map_fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY,
1213 sizeof(int), sizeof(int), 4);
1215 printf("failed to create prog_array '%s'\n", strerror(errno));
1220 static int test(void)
1222 int prog_fd, i, pass_cnt = 0, err_cnt = 0;
1223 bool unpriv = geteuid() != 0;
1225 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1226 struct bpf_insn *prog = tests[i].insns;
1227 int prog_type = tests[i].prog_type;
1228 int prog_len = probe_filter_length(prog);
1229 int *fixup = tests[i].fixup;
1230 int *prog_array_fixup = tests[i].prog_array_fixup;
1231 int expected_result;
1232 const char *expected_errstr;
1233 int map_fd = -1, prog_array_fd = -1;
1236 map_fd = create_map();
1239 prog[*fixup].imm = map_fd;
1243 if (*prog_array_fixup) {
1244 prog_array_fd = create_prog_array();
1247 prog[*prog_array_fixup].imm = prog_array_fd;
1249 } while (*prog_array_fixup);
1251 printf("#%d %s ", i, tests[i].descr);
1253 prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER,
1254 prog, prog_len * sizeof(struct bpf_insn),
1257 if (unpriv && tests[i].result_unpriv != UNDEF)
1258 expected_result = tests[i].result_unpriv;
1260 expected_result = tests[i].result;
1262 if (unpriv && tests[i].errstr_unpriv)
1263 expected_errstr = tests[i].errstr_unpriv;
1265 expected_errstr = tests[i].errstr;
1267 if (expected_result == ACCEPT) {
1269 printf("FAIL\nfailed to load prog '%s'\n",
1271 printf("%s", bpf_log_buf);
1277 printf("FAIL\nunexpected success to load\n");
1278 printf("%s", bpf_log_buf);
1282 if (strstr(bpf_log_buf, expected_errstr) == 0) {
1283 printf("FAIL\nunexpected error message: %s",
1295 if (prog_array_fd >= 0)
1296 close(prog_array_fd);
1300 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
1307 struct rlimit r = {1 << 20, 1 << 20};
1309 setrlimit(RLIMIT_MEMLOCK, &r);